repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
uni-peter-zheng/tp-qemu | qemu/tests/client_guest_shutdown.py | 7 | 2069 | import time
from autotest.client.shared import error
from virttest import utils_misc
@error.context_aware
def run(test, params, env):
"""
KVM shutdown test:
For a test with two VMs: client & guest
1) Log into the VMS(guests) that represent the client &guest
2) Send a shutdown command to the guest, or issue a system_powerdown
monitor command (depending on the value of shutdown_method)
3) Wait until the guest is down
:param test: kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment
"""
client_vm = env.get_vm(params["client_vm"])
client_vm.verify_alive()
guest_vm = env.get_vm(params["guest_vm"])
guest_vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
# shutdown both of the sessions
for vm in [client_vm, guest_vm]:
vm_session = vm.wait_for_login(timeout=timeout, username="root",
password="123456")
try:
error.base_context("shutting down the VM")
if params.get("shutdown_method") == "shell":
# Send a shutdown command to the guest's shell
vm_session.sendline(vm.get_params().get("shutdown_command"))
error.context("waiting VM to go down (shutdown shell cmd)")
elif params.get("shutdown_method") == "system_powerdown":
# Sleep for a while -- give the guest a chance to finish
# booting
time.sleep(float(params.get("sleep_before_powerdown", 10)))
# Send a system_powerdown monitor command
vm.monitor.cmd("system_powerdown")
error.context("waiting VM to go down "
"(system_powerdown monitor cmd)")
if not utils_misc.wait_for(vm.is_dead, 240, 0, 1):
vm.destroy(gracefully=False, free_mac_addresses=True)
raise error.TestFail("Guest refuses to go down")
finally:
vm_session.close()
| gpl-2.0 | -6,428,035,714,968,650,000 | 39.568627 | 76 | 0.602223 | false |
kareemallen/beets | test/test_mediafile.py | 2 | 30785 | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Automatically-generated blanket testing for the MediaFile metadata
layer.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import tempfile
import datetime
import time
from test import _common
from test._common import unittest
from beets.mediafile import MediaFile, MediaField, Image, \
MP3DescStorageStyle, StorageStyle, MP4StorageStyle, \
ASFStorageStyle, ImageType, CoverArtField
from beets.library import Item
from beets.plugins import BeetsPlugin
class ArtTestMixin(object):
"""Test reads and writes of the ``art`` property.
"""
@property
def png_data(self):
if not self._png_data:
with open(os.path.join(_common.RSRC, 'image-2x3.png'), 'rb') as f:
self._png_data = f.read()
return self._png_data
_png_data = None
@property
def jpg_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.jpg'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
@property
def tiff_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.tiff'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
def test_set_png_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.png_data)
def test_set_jpg_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.jpg_data)
def test_delete_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.art)
del mediafile.art
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.art)
class ImageStructureTestMixin(ArtTestMixin):
"""Test reading and writing multiple image tags.
The tests use the `image` media file fixture. The tags of these files
include two images, on in the PNG format, the other in JPEG format. If
the tag format supports it they also include additional metadata.
"""
def test_read_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = next(i for i in mediafile.images
if i.mime_type == 'image/png')
self.assertEqual(image.data, self.png_data)
self.assertExtendedImageAttributes(image, desc='album cover',
type=ImageType.front)
image = next(i for i in mediafile.images
if i.mime_type == 'image/jpeg')
self.assertEqual(image.data, self.jpg_data)
self.assertExtendedImageAttributes(image, desc='the artist',
type=ImageType.artist)
def test_set_image_structure(self):
mediafile = self._mediafile_fixture('empty')
image = Image(data=self.png_data, desc='album cover',
type=ImageType.front)
mediafile.images = [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 1)
image = mediafile.images[0]
self.assertEqual(image.data, self.png_data)
self.assertEqual(image.mime_type, 'image/png')
self.assertExtendedImageAttributes(image, desc='album cover',
type=ImageType.front)
def test_add_image_structure(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.png_data, desc='the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
images = (i for i in mediafile.images if i.desc == 'the composer')
image = next(images, None)
self.assertExtendedImageAttributes(
image, desc='the composer', type=ImageType.composer
)
def test_delete_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
del mediafile.images
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 0)
def test_guess_cover(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
cover = CoverArtField.guess_cover_image(mediafile.images)
self.assertEqual(cover.desc, 'album cover')
self.assertEqual(mediafile.art, cover.data)
def assertExtendedImageAttributes(self, image, **kwargs):
"""Ignore extended image attributes in the base tests.
"""
pass
class ExtendedImageStructureTestMixin(ImageStructureTestMixin):
"""Checks for additional attributes in the image structure."""
def assertExtendedImageAttributes(self, image, desc=None, type=None):
self.assertEqual(image.desc, desc)
self.assertEqual(image.type, type)
def test_add_tiff_image(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.tiff_data, desc='the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
# WMA does not preserve the order, so we have to work around this
image = filter(lambda i: i.mime_type == 'image/tiff',
mediafile.images)[0]
self.assertExtendedImageAttributes(
image, desc='the composer', type=ImageType.composer)
class LazySaveTestMixin(object):
"""Mediafile should only write changes when tags have changed
"""
@unittest.skip('not yet implemented')
def test_unmodified(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip('not yet implemented')
def test_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title})
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip('not yet implemented')
def test_tag_value_change(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.album = 'another'
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_changed_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title, 'album': 'another'})
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def _set_past_mtime(self, path):
mtime = round(time.time() - 10000)
os.utime(path, (mtime, mtime))
return mtime
class GenreListTestMixin(object):
"""Tests access to the ``genres`` property as a list.
"""
def test_read_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertItemsEqual(mediafile.genres, ['the genre'])
def test_write_genre_list(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, ['one', 'two'])
def test_write_genre_list_get_first(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.genre, 'one')
def test_append_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.genre, 'the genre')
mediafile.genres += [u'another']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, [u'the genre', u'another'])
field_extension = MediaField(
MP3DescStorageStyle(b'customtag'),
MP4StorageStyle(b'----:com.apple.iTunes:customtag'),
StorageStyle(b'customtag'),
ASFStorageStyle(b'customtag'),
)
class ExtendedFieldTestMixin(object):
def test_extended_field_write(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.customtag = 'F#'
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, 'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_write_extended_tag_from_item(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
self.assertIsNone(mediafile.customtag)
item = Item(path=mediafile.path, customtag='Gb')
item.write()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, 'Gb')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_read_flexible_attribute_from_file(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.update({'customtag': 'F#'})
mediafile.save()
item = Item.from_path(mediafile.path)
self.assertEqual(item['customtag'], 'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_invalid_descriptor(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('somekey', True)
self.assertIn('must be an instance of MediaField',
unicode(cm.exception))
def test_overwrite_property(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('artist', MediaField())
self.assertIn('property "artist" already exists',
unicode(cm.exception))
class ReadWriteTestBase(ArtTestMixin, GenreListTestMixin,
ExtendedFieldTestMixin):
"""Test writing and reading tags. Subclasses must set ``extension`` and
``audio_properties``.
"""
full_initial_tags = {
'title': u'full',
'artist': u'the artist',
'album': u'the album',
'genre': u'the genre',
'composer': u'the composer',
'grouping': u'the grouping',
'year': 2001,
'month': None,
'day': None,
'date': datetime.date(2001, 1, 1),
'track': 2,
'tracktotal': 3,
'disc': 4,
'disctotal': 5,
'lyrics': u'the lyrics',
'comments': u'the comments',
'bpm': 6,
'comp': True,
'mb_trackid': '8b882575-08a5-4452-a7a7-cbb8a1531f9e',
'mb_albumid': '9e873859-8aa4-4790-b985-5a953e8ef628',
'mb_artistid': '7cf0ea9d-86b9-4dad-ba9e-2355a64899ea',
'art': None,
'label': u'the label',
}
tag_fields = [
'title',
'artist',
'album',
'genre',
'composer',
'grouping',
'year',
'month',
'day',
'date',
'track',
'tracktotal',
'disc',
'disctotal',
'lyrics',
'comments',
'bpm',
'comp',
'mb_trackid',
'mb_albumid',
'mb_artistid',
'art',
'label',
'rg_track_peak',
'rg_track_gain',
'rg_album_peak',
'rg_album_gain',
'albumartist',
'mb_albumartistid',
'artist_sort',
'albumartist_sort',
'acoustid_fingerprint',
'acoustid_id',
'mb_releasegroupid',
'asin',
'catalognum',
'disctitle',
'script',
'language',
'country',
'albumstatus',
'media',
'albumdisambig',
'artist_credit',
'albumartist_credit',
'original_year',
'original_month',
'original_day',
'original_date',
'initial_key',
]
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_read_audio_properties(self):
mediafile = self._mediafile_fixture('full')
for key, value in self.audio_properties.items():
if isinstance(value, float):
self.assertAlmostEqual(getattr(mediafile, key), value,
delta=0.1)
else:
self.assertEqual(getattr(mediafile, key), value)
def test_read_full(self):
mediafile = self._mediafile_fixture('full')
self.assertTags(mediafile, self.full_initial_tags)
def test_read_empty(self):
mediafile = self._mediafile_fixture('empty')
for field in self.tag_fields:
self.assertIsNone(getattr(mediafile, field))
def test_write_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_overwrite_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
# Make sure the tags are already set when writing a second time
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
# Make sure the tags are already set when writing a second time
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_write_date_components(self):
mediafile = self._mediafile_fixture('full')
mediafile.year = 2001
mediafile.month = 1
mediafile.day = 2
mediafile.original_year = 1999
mediafile.original_month = 12
mediafile.original_day = 30
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_incomplete_date_components(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2001
mediafile.month = None
mediafile.day = 2
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 1))
def test_write_dates(self):
mediafile = self._mediafile_fixture('full')
mediafile.date = datetime.date(2001, 1, 2)
mediafile.original_date = datetime.date(1999, 12, 30)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_packed(self):
mediafile = self._mediafile_fixture('empty')
mediafile.tracktotal = 2
mediafile.track = 1
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 1)
self.assertEqual(mediafile.tracktotal, 2)
def test_write_counters_without_total(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.track, 2)
self.assertEqual(mediafile.tracktotal, 3)
self.assertEqual(mediafile.disc, 4)
self.assertEqual(mediafile.disctotal, 5)
mediafile.track = 10
delattr(mediafile, 'tracktotal')
mediafile.disc = 10
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 10)
self.assertEqual(mediafile.tracktotal, None)
self.assertEqual(mediafile.disc, 10)
self.assertEqual(mediafile.disctotal, None)
def test_unparseable_date(self):
mediafile = self._mediafile_fixture('unparseable')
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_tag(self):
mediafile = self._mediafile_fixture('full')
keys = self.full_initial_tags.keys()
for key in set(keys) - set(['art', 'month', 'day']):
self.assertIsNotNone(getattr(mediafile, key))
for key in keys:
delattr(mediafile, key)
mediafile.save()
mediafile = MediaFile(mediafile.path)
for key in keys:
self.assertIsNone(getattr(mediafile, key))
def test_delete_packed_total(self):
mediafile = self._mediafile_fixture('full')
delattr(mediafile, 'tracktotal')
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, self.full_initial_tags['track'])
self.assertEqual(mediafile.disc, self.full_initial_tags['disc'])
def test_delete_partial_date(self):
mediafile = self._mediafile_fixture('empty')
mediafile.date = datetime.date(2001, 12, 3)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNotNone(mediafile.month)
self.assertIsNotNone(mediafile.day)
delattr(mediafile, 'month')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_year(self):
mediafile = self._mediafile_fixture('full')
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
delattr(mediafile, 'year')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
def assertTags(self, mediafile, tags):
errors = []
for key, value in tags.items():
try:
value2 = getattr(mediafile, key)
except AttributeError:
errors.append('Tag %s does not exist' % key)
else:
if value2 != value:
errors.append('Tag %s: %r != %r' % (key, value2, value))
if any(errors):
errors = ['Tags did not match'] + errors
self.fail('\n '.join(errors))
def _mediafile_fixture(self, name):
name = name + '.' + self.extension
src = os.path.join(_common.RSRC, name)
target = os.path.join(self.temp_dir, name)
shutil.copy(src, target)
return MediaFile(target)
def _generate_tags(self, base=None):
"""Return dictionary of tags, mapping tag names to values.
"""
tags = {}
for key in self.tag_fields:
if key.startswith('rg_'):
# ReplayGain is float
tags[key] = 1.0
else:
tags[key] = b'value\u2010%s' % key
for key in ['disc', 'disctotal', 'track', 'tracktotal', 'bpm']:
tags[key] = 1
tags['art'] = self.jpg_data
tags['comp'] = True
date = datetime.date(2001, 4, 3)
tags['date'] = date
tags['year'] = date.year
tags['month'] = date.month
tags['day'] = date.day
original_date = datetime.date(1999, 5, 6)
tags['original_date'] = original_date
tags['original_year'] = original_date.year
tags['original_month'] = original_date.month
tags['original_day'] = original_date.day
return tags
class PartialTestMixin(object):
tags_without_total = {
'track': 2,
'tracktotal': 0,
'disc': 4,
'disctotal': 0,
}
def test_read_track_without_total(self):
mediafile = self._mediafile_fixture('partial')
self.assertEqual(mediafile.track, 2)
self.assertIsNone(mediafile.tracktotal)
self.assertEqual(mediafile.disc, 4)
self.assertIsNone(mediafile.disctotal)
class MP3Test(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'mp3'
audio_properties = {
'length': 1.0,
'bitrate': 80000,
'format': 'MP3',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_unknown_apic_type(self):
mediafile = self._mediafile_fixture('image_unknown_type')
self.assertEqual(mediafile.images[0].type, ImageType.other)
class MP4Test(ReadWriteTestBase, PartialTestMixin,
ImageStructureTestMixin, unittest.TestCase):
extension = 'm4a'
audio_properties = {
'length': 1.0,
'bitrate': 64000,
'format': 'AAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 2,
}
def test_add_tiff_image_fails(self):
mediafile = self._mediafile_fixture('empty')
with self.assertRaises(ValueError):
mediafile.images = [Image(data=self.tiff_data)]
def test_guess_cover(self):
# There is no metadata associated with images, we pick one at random
pass
class AlacTest(ReadWriteTestBase, unittest.TestCase):
extension = 'alac.m4a'
audio_properties = {
'length': 1.0,
'bitrate': 21830,
# 'format': 'ALAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class MusepackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'mpc'
audio_properties = {
'length': 1.0,
'bitrate': 23458,
'format': 'Musepack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 2,
}
class WMATest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'wma'
audio_properties = {
'length': 1.0,
'bitrate': 128000,
'format': 'Windows Media',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_write_genre_list_get_first(self):
# WMA does not preserve list order
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIn(mediafile.genre, [u'one', u'two'])
def test_read_pure_tags(self):
mediafile = self._mediafile_fixture('pure')
self.assertEqual(mediafile.comments, 'the comments')
self.assertEqual(mediafile.title, 'the title')
self.assertEqual(mediafile.artist, 'the artist')
class OggTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ogg'
audio_properties = {
'length': 1.0,
'bitrate': 48000,
'format': 'OGG',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_read_date_from_year_tag(self):
mediafile = self._mediafile_fixture('year')
self.assertEqual(mediafile.year, 2000)
self.assertEqual(mediafile.date, datetime.date(2000, 1, 1))
def test_write_date_to_year_tag(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2000
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.mgfile['YEAR'], [u'2000'])
def test_legacy_coverart_tag(self):
mediafile = self._mediafile_fixture('coverart')
self.assertTrue('coverart' in mediafile.mgfile)
self.assertEqual(mediafile.art, self.png_data)
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertFalse('coverart' in mediafile.mgfile)
def test_date_tag_with_slashes(self):
mediafile = self._mediafile_fixture('date_with_slashes')
self.assertEqual(mediafile.year, 2005)
self.assertEqual(mediafile.month, 6)
self.assertEqual(mediafile.day, 5)
class FlacTest(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'flac'
audio_properties = {
'length': 1.0,
'bitrate': 175120,
'format': 'FLAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class ApeTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ape'
audio_properties = {
'length': 1.0,
'bitrate': 112040,
'format': 'APE',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class WavpackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'wv'
audio_properties = {
'length': 1.0,
'bitrate': 108744,
'format': 'WavPack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class OpusTest(ReadWriteTestBase, unittest.TestCase):
extension = 'opus'
audio_properties = {
'length': 1.0,
'bitrate': 57984,
'format': 'Opus',
'samplerate': 48000,
'bitdepth': 0,
'channels': 1,
}
class AIFFTest(ReadWriteTestBase, unittest.TestCase):
extension = 'aiff'
audio_properties = {
'length': 1.0,
'bitrate': 705600,
'format': 'AIFF',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class MediaFieldTest(unittest.TestCase):
def test_properties_from_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.fields():
self.assertTrue(hasattr(mediafile, field))
def test_properties_from_readable_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.readable_fields():
self.assertTrue(hasattr(mediafile, field))
def test_known_fields(self):
fields = list(ReadWriteTestBase.tag_fields)
fields.extend(('encoder', 'images', 'genres', 'albumtype'))
self.assertItemsEqual(MediaFile.fields(), fields)
def test_fields_in_readable_fields(self):
readable = MediaFile.readable_fields()
for field in MediaFile.fields():
self.assertIn(field, readable)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit | 3,339,995,142,290,756,000 | 31.034339 | 79 | 0.608673 | false |
ironsmile/tank4eta | locals.py | 1 | 1245 | #!/usr/bin/env python
#-*- coding: utf8 -*-
import enum
import os
FRAMES = 120
MOVE_SPEED = 6
ENEMY_MOVE_SPEED = 3.5
BULLET_SPEED = 18
DIRECTION_NONE = -1
DIRECTION_DOWN = 1
DIRECTION_LEFT = 2
DIRECTION_UP = 4
DIRECTION_RIGHT = 8
AXIS_X = 0
AXIS_Y = 1
BACKGROUND_COLOUR = (46, 52, 54)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
SILVER = (160, 160, 160)
YELLOW = (255, 255, 0)
ORANGE = (252, 158, 27)
BLUE = (30, 50, 107)
MAPS_DIR = os.path.join('data', 'maps')
SOUNDS_DIR = os.path.join('data', 'sounds')
TEXTURES_DIR = os.path.join('data', 'textures')
JOY_CENTERED = (0, 0)
JOY_UP = (0, 1)
JOY_DOWN = (0, -1)
JOY_RIGHT = (1, 0)
JOY_LEFT = (-1, 0)
EVENT_FIRE = 0
EVENT_MOVE_LEFT = 1
EVENT_MOVE_RIGHT = 2
EVENT_MOVE_UP = 3
EVENT_MOVE_DOWN = 4
EVENT_STOP = 5
GAME_CONTINUE = 0
GAME_WON = 1
GAME_OVER = -1
FONT_SERIF_PATH = os.path.join('data', 'fonts', 'ubuntu', 'Ubuntu-R.ttf')
FONT_EASTERN_PATH = os.path.join('data', 'fonts', 'noto', 'NotoSerifCJKjp-Regular.otf')
class Terrain(enum.Enum):
'''
This class is used while populating the pathfinding matrix
'''
passable_see_through = 0
unpassable_no_see_through = 1
unpassable_see_through = 2
class ScreenSetting(enum.Enum):
windowed = 0
fullscreen = 1
| mit | -5,874,394,132,995,078,000 | 18.153846 | 87 | 0.640964 | false |
gem/oq-hazardlib | openquake/hmtk/tests/faults/mfd/test_anderson_luco_area_mmax.py | 1 | 10582 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (c) 2010-2017, GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# ([email protected]).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
'''
Module to test
:openquake.hmtk.faults.mfd.anderson_luco_area_mmax.AndersonLucoAreaMmax class
'''
import os
import unittest
import numpy as np
from math import log
from openquake.hazardlib.scalerel import WC1994
from openquake.hmtk.faults.mfd.anderson_luco_area_mmax import (Type1RecurrenceModel,
Type2RecurrenceModel, Type3RecurrenceModel, AndersonLucoAreaMmax)
MAGNITUDES = np.arange(5., 8.1, 0.1)
BASE_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
BUN07_FIG1 = np.genfromtxt(os.path.join(BASE_DATA_PATH,
'anderson_luco_area_mmax_results.dat'))
AL83_AREA_MMAX_INC = np.genfromtxt(os.path.join(BASE_DATA_PATH,
'anderson_luco_area_mmax_incremental.dat'))
class TestType1Recurrence(unittest.TestCase):
'''
Tests the Recurrence function of the Anderson & Luco (1983) area Mmax
type 1 model
'''
def setUp(self):
'''
'''
self.magnitudes = MAGNITUDES
self.model = Type1RecurrenceModel()
self.mmax = 8.0
self.bbar = 1.0 * log(10.)
self.dbar = 1.5 * log(10.)
self.beta = None
def test_recurrence_model_type1(self):
'''
Tests the recurrence function
In all cases if bbar > dbar (1.5) then models will fail!
No comparison figures found in Anderson & Luco (1983) - but a comparison
figure is found in Bungum (2007)
'''
# Tests 1 - master case - reproduces the Model 1line of Figure 1 in Bungum
# (2007)
# slip = 1 mm/yr, shear_modulus = 30 GPa, fault width = 60 km,
# disp_length_ratio =1E-5
self.beta = np.sqrt((1E-5 * (10. ** 16.05)) /
((30 * 1E10) * (60.0 * 1E5)))
expected_results = BUN07_FIG1[:, 1]
for iloc, mag in enumerate(self.magnitudes):
self.assertAlmostEqual(expected_results[iloc],
self.model.cumulative_value(1.0, self.mmax, mag,
self.bbar, self.dbar, self.beta), 7)
class TestType2Recurrence(unittest.TestCase):
'''
Tests the Recurrence function of the Anderson & Luco (1983) arbitrary
type 2 model
'''
def setUp(self):
'''
'''
def setUp(self):
'''
'''
self.magnitudes = MAGNITUDES
self.model = Type2RecurrenceModel()
self.mmax = 8.0
self.bbar = 1.0 * log(10.)
self.dbar = 1.5 * log(10.)
self.beta = None
def test_recurrence_model_type1(self):
'''
Tests the recurrence function
In all cases if bbar > dbar (1.5) then models will fail!
No comparison figures found in Anderson & Luco (1983) - but a comparison
figure is found in Bungum (2007)
'''
# Tests 1 - master case - reproduces the Model 2 line of Figure 1 in Bungum
# (2007)
# slip = 1 mm/yr, shear_modulus = 30 GPa, fault width = 60 km,
# disp_length_ratio =1E-5
self.beta = np.sqrt((1E-5 * (10. ** 16.05)) /
((30 * 1E10) * (60.0 * 1E5)))
expected_results = BUN07_FIG1[:, 2]
for iloc, mag in enumerate(self.magnitudes):
self.assertAlmostEqual(expected_results[iloc],
self.model.cumulative_value(1.0, self.mmax, mag,
self.bbar, self.dbar, self.beta), 7)
class TestType3Recurrence(unittest.TestCase):
'''
Tests the Recurrence function of the Anderson & Luco (1983) arbitrary
type 3 model
'''
def setUp(self):
'''
'''
self.magnitudes = MAGNITUDES
self.model = Type3RecurrenceModel()
self.mmax = 8.0
self.bbar = 1.0 * log(10.)
self.dbar = 1.5 * log(10.)
self.beta = None
def test_recurrence_model_type1(self):
'''
Tests the recurrence function
In all cases if bbar > dbar (1.5) then models will fail!
No comparison figures found in Anderson & Luco (1983) - but a comparison
figure is found in Bungum (2007)
'''
# Tests 1 - master case - reproduces the Model 3 line of Figure 1 in Bungum
# (2007)
# slip = 1 mm/yr, shear_modulus = 30 GPa, fault width = 60 km,
# disp_length_ratio =1E-5
self.beta = np.sqrt((1E-5 * (10. ** 16.05)) /
((30 * 1E10) * (60.0 * 1E5)))
expected_results = BUN07_FIG1[:, 3]
for iloc, mag in enumerate(self.magnitudes):
self.assertAlmostEqual(expected_results[iloc],
self.model.cumulative_value(1.0, self.mmax, mag,
self.bbar, self.dbar, self.beta), 7)
class TestAndersonLucoArbitrary(unittest.TestCase):
'''
Tests the Anderson & Luco Arbitrary models
:class openquake.hmtk.faults.mfd.anderson_luco_arbitrary.AndersonLucoArbitrary
'''
def setUp(self):
self.model = AndersonLucoAreaMmax()
self.config = {'Model_Type': 'First',
'MFD_spacing': 0.1,
'Model_Weight': 1.0,
'Minimum_Magnitude': 5.0,
'Maximum_Magnitude': None,
'b_value': [1.0, 0.1]}
self.msr = WC1994()
def test_case_setup(self):
'''
Tests the basic setup
'''
expected_dict = {'b_value': 1.0,
'b_value_sigma': 0.1,
'bin_width': 0.1,
'mfd_model': 'Anderson & Luco (Mmax) First',
'mfd_type': 'First',
'mfd_weight': 1.0,
'mmax': None,
'mmax_sigma': None,
'mmin': 5.0,
'occurrence_rate': None}
self.model.setUp(self.config)
self.assertDictEqual(expected_dict, self.model.__dict__)
def test_get_mmax(self):
'''
Tests the function to get Mmax
Values come from WC1994 (tested in openquake.hazardlib) - only
functionality is tested for here!
'''
# Case 1 MMmax and uncertainty specified in config
self.config['Maximum_Magnitude'] = 8.0
self.config['Maximum_Magnitude_Uncertainty'] = 0.2
self.model = AndersonLucoAreaMmax()
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
self.assertAlmostEqual(self.model.mmax, 8.0)
self.assertAlmostEqual(self.model.mmax_sigma, 0.2)
# Case 2: Mmax and uncertainty not specified in config
self.config['Maximum_Magnitude'] = None
self.config['Maximum_Magnitude_Uncertainty'] = None
self.model = AndersonLucoAreaMmax()
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 8500.)
self.assertAlmostEqual(self.model.mmax, 7.9880073)
self.assertAlmostEqual(self.model.mmax_sigma, 0.23)
def test_get_mfd(self):
'''
Tests the function to get magnitude frequency distribution
'''
self.msr = WC1994()
# Test 1: For a fault with 5 mm/yr slip, and an area of 7500 km ** 2
self.msr = WC1994()
# Testing all three calculators!
for iloc, model_type in enumerate(['First', 'Second', 'Third']):
self.model = AndersonLucoAreaMmax()
self.config = {'Model_Type': model_type,
'MFD_spacing': 0.1,
'Model_Weight': 1.0,
'Minimum_Magnitude': 5.0,
'Maximum_Magnitude': None,
'b_value': [1.0, 0.1]}
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 7500.)
test_output = self.model.get_mfd(5.0, 37.5)
print(AL83_AREA_MMAX_INC[:, iloc], test_output[2])
np.testing.assert_array_almost_equal(AL83_AREA_MMAX_INC[:, iloc],
test_output[2])
# Test case when b-value greater than d-value (raises warning!)
self.model = AndersonLucoAreaMmax()
self.config = {'Model_Type': model_type,
'MFD_spacing': 0.1,
'Model_Weight': 1.0,
'Minimum_Magnitude': 5.0,
'Maximum_Magnitude': None,
'b_value': [2.0, 0.1]}
self.model.setUp(self.config)
self.model.get_mmax(self.config, self.msr, 0., 7500.)
self.model.get_mfd(5.0, 37.5)
self.assertTrue(np.all(np.isnan(self.model.occurrence_rate)))
| agpl-3.0 | -3,235,939,884,575,549,400 | 36.928315 | 84 | 0.591571 | false |
wetneb/dissemin | papers/search_indexes.py | 2 | 2586 | from haystack import indexes
from papers.utils import remove_diacritics
from .models import Paper
# from https://github.com/django-haystack/django-haystack/issues/204#issuecomment-544579
class IntegerMultiValueField(indexes.MultiValueField):
field_type = 'integer'
class PaperIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='title')
pubdate = indexes.DateField(model_attr='pubdate')
combined_status = indexes.CharField(model_attr='combined_status', faceted=True)
doctype = indexes.CharField(model_attr='doctype', faceted=True)
visible = indexes.BooleanField(model_attr='visible')
oa_status = indexes.CharField(model_attr='oa_status', faceted=True)
availability = indexes.CharField()
#: Names of the authors
authors_full = indexes.MultiValueField()
authors_last = indexes.MultiValueField()
#: IDs of researchers
researchers = IntegerMultiValueField()
#: ORCIDs of researchers
orcids = indexes.MultiValueField()
#: IDs of institutions of researchers
institutions = IntegerMultiValueField()
#: ID of publisher
publisher = indexes.IntegerField(null=True)
#: ID of journal
journal = indexes.IntegerField(null=True)
def get_model(self):
return Paper
def full_prepare(self, obj):
obj.cache_oairecords()
return super(PaperIndex, self).full_prepare(obj)
def get_updated_field(self):
return "last_modified"
def prepare_text(self, obj):
return remove_diacritics(obj.title+' '+(' '.join(
self.prepare_authors_full(obj))))
def prepare_authors_full(self, obj):
# the 'full' field is already clean (no diacritics)
return [a['name']['full'] for a in obj.authors_list]
def prepare_authors_last(self, obj):
return [remove_diacritics(a['name']['last']) for a in obj.authors_list]
def prepare_availability(self, obj):
return 'OK' if obj.pdf_url else 'NOK'
def prepare_researchers(self, obj):
return obj.researcher_ids
def prepare_orcids(self, obj):
return [orcid for orcid in obj.orcids() if orcid]
def prepare_institutions(self, obj):
return [x for x in [r.institution_id
for r in obj.researchers] if x is not None]
def prepare_publisher(self, obj):
for r in obj.oairecords:
if r.publisher_id:
return r.publisher_id
def prepare_journal(self, obj):
for r in obj.oairecords:
if r.journal_id:
return r.journal_id
| agpl-3.0 | -1,451,157,736,661,003,000 | 31.325 | 88 | 0.670147 | false |
gavinfish/leetcode-share | python/068 Text Justification.py | 1 | 2353 | '''
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
'''
class Solution(object):
def fullJustify(self, words, maxWidth):
"""
:type words: List[str]
:type maxWidth: int
:rtype: List[str]
"""
start = end = 0
result, curr_words_length = [], 0
for i, word in enumerate(words):
if len(word) + curr_words_length + end - start > maxWidth:
if end - start == 1:
result.append(words[start] + ' ' * (maxWidth - curr_words_length))
else:
total_space = maxWidth - curr_words_length
space, extra = divmod(total_space, end - start - 1)
for j in range(extra):
words[start + j] += ' '
result.append((' ' * space).join(words[start:end]))
curr_words_length = 0
start = end = i
end += 1
curr_words_length += len(word)
result.append(' '.join(words[start:end]) + ' ' * (maxWidth - curr_words_length - (end - start - 1)))
return result
if __name__ == "__main__":
assert Solution().fullJustify(["This", "is", "an", "example", "of", "text", "justification."], 16) == [
"This is an",
"example of text",
"justification. "
] | mit | 9,116,631,088,883,721,000 | 39.586207 | 227 | 0.58946 | false |
vrsys/avangong | examples/billboard/billboard.py | 6 | 3837 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2009 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
import avango.osg
import avango.display
import sys
argv = avango.display.init(sys.argv)
view = avango.display.make_view()
view.EnableTrackball.value = True
class Rotation(avango.script.Script):
TimeIn = avango.SFFloat()
MatrixOut = avango.osg.SFMatrix()
def evaluate(self):
self.MatrixOut.value = avango.osg.make_rot_mat(self.TimeIn.value, 0, 1, 0)
text1 = avango.osg.nodes.Text(String = "LayerGeode",
Size = 0.2,
Alignment = 4,
Color = avango.osg.Vec4(0,0,0,1)
)
text2 = avango.osg.nodes.Text(String = "Billboard",
Size = 0.2,
Alignment = 4,
Color = avango.osg.Vec4(0,0,0,1)
)
panel1 = avango.osg.nodes.Panel(PanelColor = avango.osg.Vec4(1,1,0,0.5),
Width = 0.7,
Height = 0.25,
)
panel2 = avango.osg.nodes.Panel(PanelColor = avango.osg.Vec4(1,0,0,0.5),
Width = 0.7,
Height = 0.25,
)
layer_geode = avango.osg.nodes.LayerGeode(Drawables = [panel1, text1])
billboard = avango.osg.nodes.Billboard(Drawables = [panel2, text2])
billboard.Mode.value = 2 # AXIAL_ROT
billboard.Axis.value = avango.osg.Vec3(0.,1.,0.)
billboard.Normal.value = avango.osg.Vec3(0.0,0.0,1.0)
translation_layer_geode = avango.osg.nodes.MatrixTransform(
Children=[layer_geode],
Matrix = avango.osg.make_trans_mat(0.8,0.2,0))
translation_billboard = avango.osg.nodes.MatrixTransform(
Children=[billboard],
Matrix = avango.osg.make_trans_mat(0.8,-0.2,0))
rotation_boards = avango.osg.nodes.MatrixTransform(
Children=[translation_billboard, translation_layer_geode])
translation_world = avango.osg.nodes.MatrixTransform(
Children=[rotation_boards],
Matrix = avango.osg.make_trans_mat(0,1.7,-13))
time_sensor = avango.nodes.TimeSensor()
rotation = Rotation()
rotation_boards.Matrix.connect_from(rotation.MatrixOut)
rotation.TimeIn.connect_from(time_sensor.Time)
root = avango.osg.nodes.Group(Name="Root")
root.Children.value = [ translation_world ]
view.Root.value = root
avango.display.run()
| lgpl-3.0 | -1,461,746,403,414,869,200 | 42.602273 | 82 | 0.507167 | false |
Menooker/gem5_pcm | configs/example/memtest.py | 9 | 8083 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
import optparse
import sys
import m5
from m5.objects import *
parser = optparse.OptionParser()
parser.add_option("-a", "--atomic", action="store_true",
help="Use atomic (non-timing) mode")
parser.add_option("-b", "--blocking", action="store_true",
help="Use blocking caches")
parser.add_option("-l", "--maxloads", metavar="N", default=0,
help="Stop after N loads")
parser.add_option("-m", "--maxtick", type="int", default=m5.MaxTick,
metavar="T",
help="Stop after T ticks")
#
# The "tree" specification is a colon-separated list of one or more
# integers. The first integer is the number of caches/testers
# connected directly to main memory. The last integer in the list is
# the number of testers associated with the uppermost level of memory
# (L1 cache, if there are caches, or main memory if no caches). Thus
# if there is only one integer, there are no caches, and the integer
# specifies the number of testers connected directly to main memory.
# The other integers (if any) specify the number of caches at each
# level of the hierarchy between.
#
# Examples:
#
# "2:1" Two caches connected to memory with a single tester behind each
# (single-level hierarchy, two testers total)
#
# "2:2:1" Two-level hierarchy, 2 L1s behind each of 2 L2s, 4 testers total
#
parser.add_option("-t", "--treespec", type="string", default="8:1",
help="Colon-separated multilevel tree specification, "
"see script comments for details "
"[default: %default]")
parser.add_option("--force-bus", action="store_true",
help="Use bus between levels even with single cache")
parser.add_option("-f", "--functional", type="int", default=0,
metavar="PCT",
help="Target percentage of functional accesses "
"[default: %default]")
parser.add_option("-u", "--uncacheable", type="int", default=0,
metavar="PCT",
help="Target percentage of uncacheable accesses "
"[default: %default]")
parser.add_option("--progress", type="int", default=1000,
metavar="NLOADS",
help="Progress message interval "
"[default: %default]")
parser.add_option("--sys-clock", action="store", type="string",
default='1GHz',
help = """Top-level clock for blocks running at system
speed""")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
block_size = 64
try:
treespec = [int(x) for x in options.treespec.split(':')]
numtesters = reduce(lambda x,y: x*y, treespec)
except:
print "Error parsing treespec option"
sys.exit(1)
if numtesters > block_size:
print "Error: Number of testers limited to %s because of false sharing" \
% (block_size)
sys.exit(1)
if len(treespec) < 1:
print "Error parsing treespec"
sys.exit(1)
# define prototype L1 cache
proto_l1 = BaseCache(size = '32kB', assoc = 4,
hit_latency = 1, response_latency = 1,
tgts_per_mshr = 8)
if options.blocking:
proto_l1.mshrs = 1
else:
proto_l1.mshrs = 4
# build a list of prototypes, one for each level of treespec, starting
# at the end (last entry is tester objects)
prototypes = [ MemTest(atomic=options.atomic, max_loads=options.maxloads,
percent_functional=options.functional,
percent_uncacheable=options.uncacheable,
progress_interval=options.progress) ]
# next comes L1 cache, if any
if len(treespec) > 1:
prototypes.insert(0, proto_l1)
# now add additional cache levels (if any) by scaling L1 params
for scale in treespec[:-2]:
# clone previous level and update params
prev = prototypes[0]
next = prev()
next.size = prev.size * scale
next.latency = prev.latency * 10
next.assoc = prev.assoc * scale
next.mshrs = prev.mshrs * scale
prototypes.insert(0, next)
# system simulated
system = System(funcmem = SimpleMemory(in_addr_map = False),
funcbus = NoncoherentBus(),
physmem = SimpleMemory(latency = "100ns"),
cache_line_size = block_size)
system.voltage_domain = VoltageDomain(voltage = '1V')
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
def make_level(spec, prototypes, attach_obj, attach_port):
fanout = spec[0]
parent = attach_obj # use attach obj as config parent too
if len(spec) > 1 and (fanout > 1 or options.force_bus):
port = getattr(attach_obj, attach_port)
new_bus = CoherentBus(width=16)
if (port.role == 'MASTER'):
new_bus.slave = port
attach_port = "master"
else:
new_bus.master = port
attach_port = "slave"
parent.cpu_side_bus = new_bus
attach_obj = new_bus
objs = [prototypes[0]() for i in xrange(fanout)]
if len(spec) > 1:
# we just built caches, more levels to go
parent.cache = objs
for cache in objs:
cache.mem_side = getattr(attach_obj, attach_port)
make_level(spec[1:], prototypes[1:], cache, "cpu_side")
else:
# we just built the MemTest objects
parent.cpu = objs
for t in objs:
t.test = getattr(attach_obj, attach_port)
t.functional = system.funcbus.slave
make_level(treespec, prototypes, system.physmem, "port")
# connect reference memory to funcbus
system.funcbus.master = system.funcmem.port
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
if options.atomic:
root.system.mem_mode = 'atomic'
else:
root.system.mem_mode = 'timing'
# The system port is never used in the tester so merely connect it
# to avoid problems
root.system.system_port = root.system.funcbus.slave
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.maxtick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
| bsd-3-clause | 7,269,950,286,770,779,000 | 36.595349 | 78 | 0.650254 | false |
HealthCatalystSLC/healthcareai-py | healthcareai/datasets/base.py | 3 | 11739 | from os.path import dirname
from os.path import join
import pandas as pd
def load_data(data_file_name):
"""Loads data from module_path/data/data_file_name
Args:
data_file_name (str) : Name of csv file to be loaded from
module_path/data/data_file_name. Example: 'diabetes.csv'
Returns:
Pandas.core.frame.DataFrame: A pandas dataframe containing the loaded data.
Examples:
>>> load_data('diabetes.csv')
"""
file_path = join(dirname(__file__), 'data', data_file_name)
return pd.read_csv(file_path, na_values=['None'])
def load_acute_inflammations():
"""
Loads the Acute Inflammations dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`Temperature`: Temperature of patient { 35C-42C }
`Nausea`: Occurrence of nausea { 1, 0 }
`LumbarPain`: Lumbar pain { 1, 0 }
`UrinePushing`: Urine pushing (continuous need for urination) { 1, 0 }
`MicturitionPain`: Micturition pains { 1, 0 }
`UrethralBurning`: Burning of urethra, itch, swelling of urethra outlet { 1, 0 }
`Inflammation`: Inflammation of urinary bladder { 1, 0 }
`Nephritis`: Nephritis of renal pelvis origin { 1, 0 }
"""
return load_data('acute_inflammations.csv')
def load_cervical_cancer():
"""
Loads the Cervical Cancer (Risk Factors) dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Cervical+cancer+%28Risk+Factors%29
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
Age
Number of sexual partners
First sexual intercourse (age)
Num of pregnancies
Smokes
Smokes (years)
Smokes (packs/year)
Hormonal Contraceptives
Hormonal Contraceptives (years)
IUD
IUD (years)
STDs
STDs (number)
STDs:condylomatosis
STDs:cervical condylomatosis
STDs:vaginal condylomatosis
STDs:vulvo-perineal condylomatosis
STDs:syphilis
STDs:pelvic inflammatory disease
STDs:genital herpes
STDs:molluscum contagiosum
STDs:AIDS
STDs:HIV
STDs:Hepatitis B
STDs:HPV
STDs: Number of diagnosis
STDs: Time since first diagnosis
STDs: Time since last diagnosis
Dx:Cancer
Dx:CIN
Dx:HPV
Dx
Hinselmann: target variable
Schiller: target variable
Cytology: target variable
Biopsy: target variable
"""
return load_data('cervical_cancer.csv')
def load_diabetes():
"""
Loads the healthcare.ai sample diabetes dataset
Note: The dataset contains the following columns:
PatientEncounterID
PatientID
SystolicBPNBR
LDLNBR
A1CNBR
GenderFLG
ThirtyDayReadmitFLG
"""
return load_data('diabetes.csv')
def load_diagnostic_breast_cancer():
"""
Loads the Wisconsin Diagnostic Breast Cancer dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29
Note: The dataset contains the following columns:
`ID`: ID number
`Diagnosis`: Diagnosis (M = malignant, B = benign)
Ten real-valued features are computed for each cell nucleus:
`Radius`: radius (mean of distances from center to points on the perimeter)
`Texture`: texture (standard deviation of gray-scale values)
`Perimeter`: perimeter
`Area`: area
`Smoothness`: smoothness (local variation in radius lengths)
`Compactness`: compactness (perimeter^2 / area - 1.0)
`Concavity`: concavity (severity of concave portions of the contour)
`ConcavePoints`: concave points (number of concave portions of the contour)
`Symmetry`: symmetry
`FractalDimension`: fractal dimension ("coastline approximation" - 1)
For each of these ten features, the mean, standard error, and "worst"
or largest (mean of the three largest values) of these features were
computed for each image, resulting in 30 features. Features ending with
"M" indicate Mean Radius. Features ending with "S" indicate Standard
Error. Features ending with "W" indicate Worst Radius.
"""
return load_data('diagnostic_breast_cancer.csv')
def load_fertility():
"""
Loads the Fertility dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Fertility
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`Season`: Season in which the analysis was performed. 1) winter,
2) spring, 3) Summer, 4) fall. (-1, -0.33, 0.33, 1)
`Age`: Age at the time of analysis. 18-36 (0, 1)
`ChildishDiseases`: Childish diseases (ie , chicken pox, measles, mumps,
polio) 1) yes, 2) no. (0, 1)
`Trauma`: Accident or serious trauma 1) yes, 2) no. (0, 1)
`SurgicalIntervention`: Surgical intervention 1) yes, 2) no. (0, 1)
`HighFevers`: High fevers in the last year 1) less than three months
ago, 2) more than three months ago, 3) no. (-1, 0, 1)
`AlcoholConsumption`: Frequency of alcohol consumption 1) several times
a day, 2) every day, 3) several times a week, 4) once a week,
5) hardly ever or never (0, 1)
`SmokingHabit`: Smoking habit 1) never, 2) occasional 3) daily.
(-1, 0, 1)
`SittingHours`: Number of hours spent sitting per day ene-16 (0, 1)
`Diagnosis`: Diagnosis normal (N), altered (O)
"""
return load_data('fertility.csv')
def load_heart_disease():
"""
Loads the Stratlog (Heart) dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Statlog+%28Heart%29
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`Age`: age
`Sex`: sex
`ChestPainType`: chest pain type (4 values)
`BloodPressure`: resting blood pressure
`Cholesterol`: serum cholesterol in mg/dl
`BloodSugar`: fasting blood sugar > 120 mg/dl
`EC_Results`: resting electrocardiographic results (values 0,1,2)
`MaxHeartRate`: maximum heart rate achieved
`Angina`: exercise induced angina
`OldPeak`: oldpeak = ST depression induced by exercise relative to rest
`PeakSlope`: the slope of the peak exercise ST segment
`MajorVessels`: number of major vessels (0-3) colored by flourosopy
`Thal`: thal: 3 = normal; 6 = fixed defect; 7 = reversable defect
`Outcome`: Absence (1) or presence (2) of heart disease
"""
return load_data('heart_disease.csv')
def load_mammographic_masses():
"""
Loads the Mammographic Mass dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Mammographic+Mass
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`BiRadsAssessment`: BI-RADS assessment: 1 to 5 (ordinal,
non-predictive)
`Age`: patient's age in years (integer)
`Shape`: mass shape: round=1 oval=2 lobular=3 irregular=4 (nominal)
`Margin`: mass margin: circumscribed=1 microlobulated=2 obscured=3
ill-defined=4 spiculated=5 (nominal)
`Density`: mass density high=1 iso=2 low=3 fat-containing=4 (ordinal)
`Severity`: benign=0 or malignant=1 (binominal, goal field!)
"""
return load_data('mammographic_masses.csv')
def load_pima_indians_diabetes():
"""
Loads the PIMA Indians Diabetes dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`Pregnancies`: Number of times pregnant
`PlasmaGlucose`: Plasma glucose concentration a 2 hours in an oral
glucose tolerance test
`DiastolicBP`: Diastolic blood pressure (mm Hg)
`TricepSkinfoldThickness`: Triceps skin fold thickness (mm)
`Insulin`: 2-Hour serum insulin (mu U/ml)
`BMI`: Body mass index (weight in kg/(height in m)^2)
`DiabetesPedigreeFunction`: Diabetes pedigree function
`Age`: Age (years)
`Diabetes`: Class variable (Y or N)
"""
return load_data('pima_indians_diabetes.csv')
def load_prognostic_breast_cancer():
"""
Loads the Wisconsin Prognostic Breast Cancer dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Prognostic%29
Note: The dataset contains the following columns:
`ID`: ID number
`Outcome`: Outcome (R = recur, N = nonrecur)
`Time`: Time (recurrence time if field 2 = R, disease-free time if field 2 = N)
`TumorSize`: diameter of the excised tumor in centimeters
`LymphNodeStatus`: number of positive axillary lymph nodes observed at time of surgery
Ten real-valued features are computed for each cell nucleus:
`Radius`: radius (mean of distances from center to points on the perimeter)
`Texture`: texture (standard deviation of gray-scale values)
`Perimeter`: perimeter
`Area`: area
`Smoothness`: smoothness (local variation in radius lengths)
`Compactness`: compactness (perimeter^2 / area - 1.0)
`Concavity`: concavity (severity of concave portions of the contour)
`ConcavePoints`: concave points (number of concave portions of the contour)
`Symmetry`: symmetry
`FractalDimension`: fractal dimension ("coastline approximation" - 1)
For each of these ten features, the mean, standard error, and "worst"
or largest (mean of the three largest values) of these features were
computed for each image, resulting in 30 features. Features ending with
"M" indicate Mean Radius. Features ending with "S" indicate Standard
Error. Features ending with "W" indicate Worst Radius.
"""
return load_data('prognostic_breast_cancer.csv')
def load_thoracic_surgery():
"""
Loads the Thoracic Surgery dataset from the UCI ML Library
URL: https://archive.ics.uci.edu/ml/datasets/Thoracic+Surgery+Data
Note: The dataset contains the following columns:
`PatientID`: Patient Identifier
`DGN`: Diagnosis - specific combination of ICD-10 codes for primary and secondary as well
multiple tumours if any (DGN3,DGN2,DGN4,DGN6,DGN5,DGN8,DGN1)
`PRE4`: Forced vital capacity - FVC (numeric)
`PRE5`: Volume that has been exhaled at the end of the first second of forced
expiration - FEV1 (numeric)
`PRE6`: Performance status - Zubrod scale (PRZ2,PRZ1,PRZ0)
`PRE7`: Pain before surgery (T,F)
`PRE8`: Haemoptysis before surgery (T,F)
`PRE9`: Dyspnoea before surgery (T,F)
`PRE10`: Cough before surgery (T,F)
`PRE11`: Weakness before surgery (T,F)
`PRE14`: T in clinical TNM - size of the original tumour, from OC11 (smallest) to OC14
(largest) (OC11,OC14,OC12,OC13)
`PRE17`: Type 2 DM - diabetes mellitus (T,F)
`PRE19`: MI up to 6 months (T,F)
`PRE25`: PAD - peripheral arterial diseases (T,F)
`PRE30`: Smoking (T,F)
`PRE32`: Asthma (T,F)
`AGE`: Age at surgery (numeric)
`Risk1Y`: 1 year survival period - (T)rue value if died (T,F)
"""
return load_data('thoracic_surgery.csv')
| mit | 4,831,914,556,192,065,000 | 38.26087 | 97 | 0.651333 | false |
Quikling/gpdb | src/test/tinc/tincrepo/mpp/lib/regress/regress_gpfilespace.py | 9 | 2937 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest2 as unittest
from mpp.lib.PSQL import PSQL
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpfilespace import HAWQGpfilespace
class GpFilespaceRegressionTests(unittest.TestCase):
def __init__(self, methodName):
self.gpfs = Gpfilespace()
self.gpfs_h = HAWQGpfilespace()
super(GpFilespaceRegressionTests, self).__init__(methodName)
def tearDown(self):
PSQL.run_sql_command('Drop filespace test_fs_a;')
def test_create_filespace(self):
self.gpfs.create_filespace('test_fs_a')
fs_list = PSQL.run_sql_command("select fsname from pg_filespace where fsname<>'pg_system';", flags = '-q -t')
self.assertTrue('test_fs_a' in fs_list)
def test_drop_fiespace(self):
self.gpfs.create_filespace('test_fs_b')
self.assertTrue(self.gpfs.drop_filespace('test_fs_b'))
def test_fs_exists(self):
self.gpfs.create_filespace('test_fs_a')
self.assertTrue(self.gpfs.exists('test_fs_a'))
def test_showtempfiles(self):
result = self.gpfs.showtempfiles()
show = False
for line in result.stdout.splitlines():
if 'Current Filespace for TEMPORARY_FILES' in line:
show = True
self.assertTrue(show)
def test_get_filespace_location(self):
result = self.gpfs.get_filespace_location()
self.assertTrue(len(result) >0)
def test_get_filespace_directory(self):
result = self.gpfs.get_filespace_directory()
self.assertTrue(len(result) >0)
def test_get_hosts_for_filespace(self):
self.gpfs.create_filespace('test_fs_a')
fs_location = PSQL.run_sql_command("select fselocation from pg_filespace_entry where fselocation like '%test_fs_a%' and fsedbid=2;", flags = '-q -t')
result = self.gpfs.get_hosts_for_filespace(fs_location.strip())
self.assertEquals(result[0]['location'],fs_location.strip())
def test_create_filespace_hawq(self):
self.gpfs_h.create_filespace('test_fs_hq')
fs_list = PSQL.run_sql_command("select fsname from pg_filespace where fsname<>'pg_system';", flags = '-q -t')
self.assertTrue('test_fs_hq' in fs_list)
#self.gpfs.drop_filespace('test_fs_hq')
| apache-2.0 | 7,208,309,053,970,767,000 | 39.232877 | 158 | 0.685734 | false |
godotgildor/freebayes | python/allelebayes.py | 2 | 17483 | # calculates data likelihoods for sets of alleles
import multiset
import sys
import cjson
import phred
import json
import math
import operator
from logsumexp import logsumexp
from dirichlet import dirichlet_maximum_likelihood_ratio, dirichlet, multinomial, multinomialln
from factorialln import factorialln
"""
This module attempts to find the best method to approximate the integration of
data likelihoods for the bayesian variant caller we're currently working on.
stdin should be a stream of newline-delimited json records each encoding a list
of alleles which have been parsed out of alignment records. alleles.cpp in
this distribution provides such a stream.
Erik Garrison <[email protected]> 2010-07-15
"""
#potential_alleles = [
# {'type':'reference'},
# {'type':'snp', 'alt':'A'},
# {'type':'snp', 'alt':'T'},
# {'type':'snp', 'alt':'G'},
# {'type':'snp', 'alt':'C'}
# ]
def list_genotypes_to_count_genotypes(genotypes):
count_genotypes = []
for genotype in genotypes:
counts = {}
for allele in genotype:
if counts.has_key(allele):
counts[allele] += 1
else:
counts[allele] = 1
count_genotypes.append(counts.items())
return count_genotypes
"""
ploidy = 2
potential_alleles = ['A','T','G','C']
# genotypes are expressed as sets of allele frequencies
genotypes = list_genotypes_to_count_genotypes(list(multiset.multichoose(ploidy, potential_alleles)))
"""
# TODO
# update this so that we aren't just using the 'alternate' field from the alleles
# and are also incorporating the type of allele (ins, deletion, ref, snp)
def group_alleles(alleles):
groups = {}
for allele in alleles:
alt = allele['alt']
if groups.has_key(alt):
groups[alt].append(allele)
else:
groups[alt] = [allele]
return groups
def alleles_quality_to_lnprob(alleles):
for allele in alleles:
allele['quality'] = phred.phred2ln(allele['quality'])
return alleles
def product(l):
return reduce(operator.mul, l)
def observed_alleles_in_genotype(genotype, allele_groups):
in_genotype = {}
not_in_genotype = {}
for key in allele_groups.keys():
found = False
for allele, count in genotype:
if allele == key:
in_genotype[key] = allele_groups[key]
found = True
break
if not found:
not_in_genotype[key] = allele_groups[key]
return in_genotype, not_in_genotype
#def scaled_sampling_prob(genotype, alleles):
# """The probability of drawing the observations in the allele_groups out of
# the given genotype, scaled by the number of possible multiset permutations
# of the genotype (we scale because we don't phase our genotypes under
# evaluation)."""
# allele_groups = group_alleles(alleles)
# if len(allele_groups.items()) == 0:
# return 0
# genotype_allele_frequencies = [x[1] for x in genotype]
# multiplicity = sum(genotype_allele_frequencies)
# genotype_allele_probabilities = [float(x)/multiplicity for x in genotype_allele_frequencies]
# observed_allele_frequencies = [len(x) for x in allele_groups.items()]
# observation_product = 1
# for allele, count in genotype:
# if allele_groups.has_key(allele):
# observation_product *= math.pow(float(count) / multiplicity, len(allele_groups[allele]))
# return float(math.pow(math.factorial(multiplicity), 2)) \
# / (product([math.factorial(x) for x in genotype_allele_frequencies]) *
# sum([math.factorial(x) for x in observed_allele_frequencies])) \
# * observation_product
#
# TODO XXX
# Yes, this is the sampling probability. It is the multinomial sampling
# probability, which is the specific probability of a specific set of
# categorical outcomes. Unfortunately, this is not what we really want here.
# What we want is the prior probability that a given set of draws come out of a
# given multiset (genotype, in our case). I believe that this is given by the
# Dirichlet distribution. Investigate.
def sampling_prob(genotype, alleles):
"""The specific probability of drawing the observations in alleles out of the given
genotype, follows the multinomial probability distribution."""
allele_groups = group_alleles(alleles)
multiplicity = sum([x[1] for x in genotype])
print genotype, multiplicity, alleles
for allele, count in genotype:
if allele_groups.has_key(allele):
print allele, count, math.pow(float(count) / multiplicity, len(allele_groups[allele]))
print product([math.factorial(len(obs)) for obs in allele_groups.values()])
print allele_groups.values()
return float(math.factorial(len(alleles))) \
/ product([math.factorial(len(obs)) for obs in allele_groups.values()]) \
* product([math.pow(float(count) / multiplicity, len(allele_groups[allele])) \
for allele, count in genotype if allele_groups.has_key(allele)])
def likelihood_given_true_alleles(observed_alleles, true_alleles):
prob = 0
for o, t in zip(observed_alleles, true_alleles):
if o['alt'] == t['alt']:
prob += math.log(1 - math.exp(o['quality']))
else:
prob += o['quality']
return prob
def data_likelihood_exact(genotype, observed_alleles):
"""'Exact' data likelihood, sum of sampling probability * join Q score for
the observed alleles over all possible underlying 'true allele'
combinations."""
#print "probability that observations", [o['alt'] for o in observed_alleles], "arise from genotype", genotype
observation_count = len(observed_alleles)
ploidy = sum([count for allele, count in genotype])
allele_probs = [count / float(ploidy) for allele, count in genotype]
probs = []
# for all true allele combinations X permutations
for true_allele_combination in multiset.multichoose(observation_count, [x[0] for x in genotype]):
for true_allele_permutation in multiset.permutations(true_allele_combination):
# this mapping allows us to use sampling_prob the same way as we do when we use JSON allele observation records
true_alleles = [{'alt':allele} for allele in true_allele_permutation]
allele_groups = group_alleles(true_alleles)
observations = []
for allele, count in genotype:
if allele_groups.has_key(allele):
observations.append(len(allele_groups[allele]))
else:
observations.append(0)
#sprob = dirichlet_maximum_likelihood_ratio(allele_probs, observations) # distribution parameter here
lnsampling_prob = multinomialln(allele_probs, observations)
prob = lnsampling_prob + likelihood_given_true_alleles(observed_alleles, true_alleles)
#print math.exp(prob), sprob, genotype, true_allele_permutation
#print genotype, math.exp(prob), sprob, true_allele_permutation, [o['alt'] for o in observed_alleles]
probs.append(prob)
# sum the individual probability of all combinations
p = logsumexp(probs)
#print math.exp(p)
return p
def data_likelihood_estimate(genotype, alleles):
"""Estimates the data likelihood, which is a sum over all possible error
profiles, or underlying 'true alleles', motivating the observations."""
# for up to error_depth errors
pass
def genotype_combination_sampling_probability(genotype_combination, observed_alleles):
multiplicity = math.log(ploidy * len(genotype_combination))
result = 1 - multiplicity
allele_groups = group_alleles(observed_alleles)
for allele, observations in allele_groups.iteritems():
result += math.log(math.factorial(len(observations)))
# scale by product of multiset permutations of all genotypes in combo
for combo in genotype_combination:
for genotype in combo:
m_i = sum([a[1] for a in genotype])
result += math.log(math.factorial(m_i))
result -= sum([math.log(math.factorial(allele[1])) for allele in genotype])
return result
def count_frequencies(genotype_combo):
counts = {}
alleles = {}
for genotype in genotype_combo:
for allele, count in genotype:
if alleles.has_key(allele):
alleles[allele] += count
else:
alleles[allele] = count
for allele, count in alleles.iteritems():
if counts.has_key(count):
counts[count] += 1
else:
counts[count] = 1
return counts
def allele_frequency_probability(allele_frequency_counts, theta=0.001):
"""Implements Ewens' Sampling Formula. allele_frequency_counts is a
dictionary mapping count -> number of alleles with this count in the
population."""
M = sum([frequency * count for frequency, count in allele_frequency_counts.iteritems()])
return math.factorial(M) \
/ (theta * product([theta + h for h in range(1, M)])) \
* product([math.pow(theta, count) / math.pow(frequency, count) * math.factorial(count) \
for frequency, count in allele_frequency_counts.iteritems()])
def powln(n, m):
"""Power of number in log space"""
return sum([n] * m)
def allele_frequency_probabilityln(allele_frequency_counts, theta=0.001):
"""Log space version to avoid inevitable overflows with coverage >100.
Implements Ewens' Sampling Formula. allele_frequency_counts is a
dictionary mapping count -> number of alleles with this count in the
population."""
thetaln = math.log(theta)
M = sum([frequency * count for frequency, count in allele_frequency_counts.iteritems()])
return factorialln(M) \
- (thetaln + sum([math.log(theta + h) for h in range(1, M)])) \
+ sum([powln(thetaln, count) - powln(math.log(frequency), count) + factorialln(count) \
for frequency, count in allele_frequency_counts.iteritems()])
def genotype_probabilities(genotypes, alleles):
return [[str(genotype), data_likelihood_exact(genotype, alleles)] for genotype in genotypes]
def genotype_probabilities_heuristic(genotypes, alleles):
groups = group_alleles(alleles)
# group genotypes relative to the groups of observed alleles
# take the first member of each group and apply our data likelihood calculation
# then apply it to the rest
if len(groups.keys()) is 1:
# we can cleanly do all-right, part-right, all-wrong
pass
if len(groups.keys()) is 2:
# we can do all-right, two types of 'part-right', and all-wrong
pass
def multiset_banded_genotype_combinations(sample_genotypes, bandwidth):
for index_combo in multiset.multichoose(len(samples), range(bandwidth)):
for index_permutation in multiset.permutations(index_combo):
yield [genotypes[index] for index, genotypes in zip(index_permutation, sample_genotypes)]
# TODO you should implement gabor's banding solution; the above multiset method
# is comically large and produces incorrect results despite the computational load
def banded_genotype_combinations(sample_genotypes, bandwidth, band_depth):
# always provide the 'best' case
yield [(sample, genotypes[0]) for sample, genotypes in sample_genotypes]
for i in range(1, bandwidth):
for j in range(1, band_depth): # band_depth is the depth to which we explore the bandwith... TODO explain better
indexes = j * [i] + (len(sample_genotypes) - j) * [0]
for index_permutation in multiset.permutations(indexes):
yield [(sample, genotypes[index]) for index, (sample, genotypes) in zip(index_permutation, sample_genotypes)]
def genotype_str(genotype):
return reduce(operator.add, [allele * count for allele, count in genotype])
if __name__ == '__main__':
ploidy = 2 # assume ploidy 2 for all individuals and all positions
potential_alleles = ['A','T','G','C']
# genotypes are expressed as sets of allele frequencies
genotypes = list_genotypes_to_count_genotypes(list(multiset.multichoose(ploidy, potential_alleles)))
for line in sys.stdin:
position = cjson.decode(line)
#print position['position']
samples = position['samples']
position['coverage'] = sum([len(sample['alleles']) for samplename, sample in samples.iteritems()])
#potential_alleles = ['A','T','G','C']
potential_alleles = set()
for samplename, sample in samples.items():
# only process snps and reference alleles
alleles = [allele for allele in sample['alleles'] if allele['type'] in ['reference', 'snp']]
alleles = alleles_quality_to_lnprob(alleles)
sample['alleles'] = alleles
potential_alleles = potential_alleles.union(set([allele['alt'] for allele in alleles]))
position['filtered coverage'] = sum([len(sample['alleles']) for samplename, sample in samples.iteritems()])
# genotypes are expressed as sets of allele frequencies
#genotypes = list_genotypes_to_count_genotypes(list(multiset.multichoose(ploidy, list(potential_alleles))))
for samplename, sample in samples.items():
alleles = sample['alleles']
groups = group_alleles(alleles)
sample['genotypes'] = [[genotype, data_likelihood_exact(genotype, alleles)] for genotype in genotypes]
#sample['genotypes_estimate'] = [[str(genotype), data_likelihood_estimate(genotype, alleles)] for genotype in genotypes]
# estimate the posterior over all genotype combinations within some indexed bandwidth of optimal
# TODO preserve sample names in the genotype comos
sample_genotypes = [(name, sorted(sample['genotypes'], key=lambda genotype: genotype[1], reverse=True)) for name, sample in samples.iteritems()]
genotype_combo_probs = []
#for combo in multiset_banded_genotype_combinations(sample_genotypes, 2):
#for combo in banded_genotype_combinations(sample_genotypes, min(len(genotypes), 2), len(samples)):
# now marginals time...
marginals = {}
for name, sample in samples.iteritems():
marginals[name] = {}
combos_tested = 0
for combo in banded_genotype_combinations(sample_genotypes, min(len(genotypes), 2), 2):
combos_tested += 1
probability_observations_given_genotypes = sum([prob for name, (genotype, prob) in combo])
frequency_counts = count_frequencies([genotype for name, (genotype, prob) in combo])
prior_probability_of_genotype = allele_frequency_probabilityln(frequency_counts)
combo_prob = prior_probability_of_genotype + probability_observations_given_genotypes
for name, (genotype, prob) in combo:
gstr = genotype_str(genotype)
if marginals[name].has_key(gstr):
marginals[name][gstr].append(combo_prob)
else:
marginals[name][gstr] = [combo_prob]
genotype_combo_probs.append([combo, combo_prob])
genotype_combo_probs = sorted(genotype_combo_probs, key=lambda c: c[1], reverse=True)
#for line in [json.dumps({'prob':prior_probability_of_genotype, 'combo':combo}) for combo, prior_probability_of_genotype in genotype_combo_probs]:
# print line
# sum, use to normalize
# apply bayes rule
#print genotype_combo_probs
#print [prob for combo, prob in genotype_combo_probs]
#for combo, prob in genotype_combo_probs:
# print prob
posterior_normalizer = logsumexp([prob for combo, prob in genotype_combo_probs])
# handle marginals
for sample, genotype_probs in marginals.iteritems():
for genotype, probs in genotype_probs.iteritems():
marginals[sample][genotype] = logsumexp(probs) - posterior_normalizer
best_genotype_combo = genotype_combo_probs[0][0]
best_genotype_combo_prob = genotype_combo_probs[0][1]
#best_genotype_probability = math.exp(sum([prob for name, (genotype, prob) in best_genotype_combo]) \
# + allele_frequency_probabilityln(count_frequencies([genotype for name, (genotype, prob) in best_genotype_combo])) \
# - posterior_normalizer)
best_genotype_probability = math.exp(best_genotype_combo_prob - posterior_normalizer)
position['best_genotype_combo'] = [[name, genotype_str(genotype), math.exp(marginals[name][genotype_str(genotype)])]
for name, (genotype, prob) in best_genotype_combo]
position['best_genotype_combo_prob'] = best_genotype_probability
position['posterior_normalizer'] = math.exp(posterior_normalizer)
position['combos_tested'] = combos_tested
#position['genotype_combo_probs'] = genotype_combo_probs
# TODO estimate marginal probabilities of genotypings
# here we cast everything into float-space
for samplename, sample in samples.items():
sample['genotypes'] = sorted([[genotype_str(genotype), math.exp(prob)] for genotype, prob in sample['genotypes']],
key=lambda c: c[1], reverse=True)
print cjson.encode(position)
#print position['position']
| mit | -3,796,656,571,261,696,000 | 45.871314 | 154 | 0.66293 | false |
kaixinjxq/crosswalk-test-suite | tools/build/varshop.py | 21 | 1935 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Hao, Yunfei <[email protected]>
class GlobalVars:
BUILD_PARAMETERS = None
BUILD_ROOT = None
BUILD_ROOT_SRC = None
BUILD_TIME= None
CROSSWALK_BRANCH= None
CROSSWALK_VERSION= None
DEFAULT_CMD_TIMEOUT = None
PKG_ARCHS= None
PKG_MODES= None
def setValue(key, value):
exec 'GlobalVars.%s = value' % key
def getValue(key):
exec 'rlt = GlobalVars.%s' % key
return rlt
| bsd-3-clause | 8,230,647,700,913,999,000 | 38.489796 | 80 | 0.750904 | false |
google/ftc-object-detection | training/camera_cv.py | 1 | 6053 | #!/usr/bin/env python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import numpy as np
import os
import errno
import sys
from object_detector import ObjectDetector as TFObjectDetector
from object_detector_lite import ObjectDetector as LiteObjectDetector
import cv2
description_text = """\
Use this script to visualize network output on each frame of a video.
Once you've trained a network, you may want to intuitively understand its
performance on different videos, especially looking at frame to frame
performance in a single video. This script enables that visualization for both
TensorFlow and TFLite model formats. Additionally, this script lets you save a
video with each frame annotated with output from the network, as well as save
individual annotated frames if desired.
"""
epilog_text = """\
example:
./camera_cv.py --movie [movie.mp4] --path_to_model [model.pb]
"""
parser = argparse.ArgumentParser(
description=description_text,
epilog=epilog_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--movie", type=str, default="",
help="Movie file to run prediction on")
parser.add_argument("--write_images", default=False, action="store_true",
help="Whether to write each frame as a separate image")
parser.add_argument("--write_movie", default=False, action="store_true",
help="Whether to write an annotated movie")
parser.add_argument("--tflite", default=False, action="store_true",
help="Whether model is tflite")
parser.add_argument("--path_to_model", type=str,
default="output_inference_graph/frozen_inference_graph.pb",
help="Directory containing frozen checkpoint file or .tflite model")
parser.add_argument("--path_to_labels", type=str,
default="train_data/label.pbtxt",
help="Text proto (TF) or text (tflite) file containing label map")
parser.add_argument("--num_classes", type=int, default=2,
help="Number of classes")
parser.add_argument("--threshold", type=float, default=0.6,
help="Threshold for displaying detections")
parser.add_argument("--box_priors", type=str,
default="box_priors.txt",
help="Path to box_priors.txt file containing priors (only required for TFLite)")
args = parser.parse_args()
if args.movie is not "" and not os.path.exists(args.movie):
print("Movie file %s missing" % args.movie)
sys.exit(1)
if args.movie is not "":
cam = cv2.VideoCapture(args.movie)
else:
cam = cv2.VideoCapture(0)
args.movie = "movie.mkv"
width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))
if args.tflite:
objdet = LiteObjectDetector(args.path_to_model, args.path_to_labels,
args.box_priors)
else:
objdet = TFObjectDetector(args.path_to_model, args.path_to_labels,
args.num_classes)
movie_name = os.path.splitext(os.path.basename(args.movie))[0]
if args.write_movie:
out_path = os.path.join(os.path.dirname(args.movie), movie_name + "_boxes")
movie_path = "%s.mkv" % out_path
print("Writing movie to", movie_path)
writer = cv2.VideoWriter(
movie_path,
cv2.VideoWriter_fourcc(*"MJPG"),
int(cam.get(cv2.CAP_PROP_FPS)),
(width, height)
)
# Quit if there was a problem
if not writer.isOpened():
print("Unable to open video!")
sys.exit()
if args.write_images:
movie_dir = os.path.dirname(args.movie)
images_dir = os.path.join(movie_dir, "%s_images" % movie_name)
print("Writing images to %s" % images_dir)
try:
os.makedirs(images_dir)
except OSError as e:
if e.errno == errno.EEXIST:
print("Directory exists already, continuing!")
else:
raise
counter = 0
ret, frame = cam.read()
while ret == True:
img = frame.copy() # Aliased, but lets us turn off transformations as necessary.
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img.shape
expand = np.expand_dims(img, axis=0)
result = objdet.detect(expand)
boxes = []
for i in range(result['num_detections']):
if result['detection_scores'][i] > args.threshold:
class_ = result['detection_classes'][i]
box = result['detection_boxes'][i]
score = result['detection_scores'][i]
y1, x1 = int(box[0] * h), int(box[1] * w)
y2, x2 = int(box[2] * h), int(box[3] * w)
if args.tflite:
x1, y1, x2, y2 = y1, x1, y2, x2
boxes.append((class_, score, x1, y1, x2, y2))
for box in boxes:
class_, score, x1, y1, x2, y2 = box
w1 = x2-x1
h1 = y2-y1
cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2)
cv2.putText(img, "%s: %5.2f" % (class_-1, score), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow('image', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if args.write_movie:
writer.write(img)
if args.write_images:
print("[%d] Writing original to %s" % (counter, images_dir))
cv2.imwrite(os.path.join(images_dir, "orig_%05d.png" % counter), frame)
print("[%d] Writing boxes to %s" % (counter, images_dir))
cv2.imwrite(os.path.join(images_dir, "box_%05d.png" % counter), img)
counter += 1
ret, frame = cam.read()
if args.write_movie:
writer.release()
| apache-2.0 | -2,610,202,393,842,050,600 | 35.908537 | 110 | 0.651743 | false |
ww9rivers/splunk-sdk-python | splunklib/modularinput/event_writer.py | 3 | 2790 | # Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import sys
from .event import ET
try:
from splunklib.six.moves import cStringIO as StringIO
except ImportError:
from splunklib.six import StringIO
class EventWriter(object):
"""``EventWriter`` writes events and error messages to Splunk from a modular input.
Its two important methods are ``writeEvent``, which takes an ``Event`` object,
and ``log``, which takes a severity and an error message.
"""
# Severities that Splunk understands for log messages from modular inputs.
# Do not change these
DEBUG = "DEBUG"
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
FATAL = "FATAL"
def __init__(self, output = sys.stdout, error = sys.stderr):
"""
:param output: Where to write the output; defaults to sys.stdout.
:param error: Where to write any errors; defaults to sys.stderr.
"""
self._out = output
self._err = error
# has the opening <stream> tag been written yet?
self.header_written = False
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write(b"<stream>")
self.header_written = True
event.write_to(self._out)
def log(self, severity, message):
"""Logs messages about the state of this modular input to Splunk.
These messages will show up in Splunk's internal logs.
:param severity: ``string``, severity of message, see severities defined as class constants.
:param message: ``string``, message to log.
"""
self._err.write(("%s %s\n" % (severity, message)).encode('utf-8'))
self._err.flush()
def write_xml_document(self, document):
"""Writes a string representation of an
``ElementTree`` object to the output stream.
:param document: An ``ElementTree`` object.
"""
self._out.write(ET.tostring(document))
self._out.flush()
def close(self):
"""Write the closing </stream> tag to make this XML well formed."""
self._out.write(b"</stream>")
| apache-2.0 | -1,290,993,148,854,081,300 | 31.823529 | 100 | 0.646237 | false |
sovietspy2/uzletiProject | python/Lib/doctest.py | 3 | 102700 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
return open(filename).read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print test.name, '->', runner.run(test)
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return TestResults(f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return TestResults(f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-']
if testfiles:
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly won't work
# because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m)
else:
failures, _ = testfile(filename, module_relative=False)
if failures:
return 1
else:
r = unittest.TextTestRunner()
r.run(DocTestSuite())
return 0
if __name__ == "__main__":
sys.exit(_test())
| gpl-3.0 | 6,232,259,507,086,020,000 | 36.435629 | 81 | 0.555706 | false |
ofayans/freeipa | ipaclient/remote_plugins/2_164/hostgroup.py | 16 | 20373 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Groups of hosts.
Manage groups of hosts. This is useful for applying access control to a
number of hosts by using Host-based Access Control.
EXAMPLES:
Add a new host group:
ipa hostgroup-add --desc="Baltimore hosts" baltimore
Add another new host group:
ipa hostgroup-add --desc="Maryland hosts" maryland
Add members to the hostgroup (using Bash brace expansion):
ipa hostgroup-add-member --hosts={box1,box2,box3} baltimore
Add a hostgroup as a member of another hostgroup:
ipa hostgroup-add-member --hostgroups=baltimore maryland
Remove a host from the hostgroup:
ipa hostgroup-remove-member --hosts=box2 baltimore
Display a host group:
ipa hostgroup-show baltimore
Delete a hostgroup:
ipa hostgroup-del baltimore
""")
register = Registry()
@register()
class hostgroup(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'A description of this host-group'),
),
parameters.Str(
'member_host',
required=False,
label=_(u'Member hosts'),
),
parameters.Str(
'member_hostgroup',
required=False,
label=_(u'Member host-groups'),
),
parameters.Str(
'memberof_hostgroup',
required=False,
label=_(u'Member of host-groups'),
),
parameters.Str(
'memberof_netgroup',
required=False,
label=_(u'Member of netgroups'),
),
parameters.Str(
'memberof_sudorule',
required=False,
label=_(u'Member of Sudo rule'),
),
parameters.Str(
'memberof_hbacrule',
required=False,
label=_(u'Member of HBAC rule'),
),
parameters.Str(
'memberindirect_host',
required=False,
label=_(u'Indirect Member hosts'),
),
parameters.Str(
'memberindirect_hostgroup',
required=False,
label=_(u'Indirect Member host-groups'),
),
parameters.Str(
'memberofindirect_hostgroup',
required=False,
label=_(u'Indirect Member of host-group'),
),
parameters.Str(
'memberofindirect_sudorule',
required=False,
label=_(u'Indirect Member of Sudo rule'),
),
parameters.Str(
'memberofindirect_hbacrule',
required=False,
label=_(u'Indirect Member of HBAC rule'),
),
)
@register()
class hostgroup_add(Method):
__doc__ = _("Add a new hostgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this host-group'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class hostgroup_add_member(Method):
__doc__ = _("Add members to a hostgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to add'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to add'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be added'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members added'),
),
)
@register()
class hostgroup_del(Method):
__doc__ = _("Delete a hostgroup.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class hostgroup_find(Method):
__doc__ = _("Search for hostgroups.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this host-group'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("hostgroup-name")'),
default=False,
autofill=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'host'),
doc=_(u'Search for host groups with these member hosts.'),
),
parameters.Str(
'no_host',
required=False,
multivalue=True,
cli_name='no_hosts',
label=_(u'host'),
doc=_(u'Search for host groups without these member hosts.'),
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'host group'),
doc=_(u'Search for host groups with these member host groups.'),
),
parameters.Str(
'no_hostgroup',
required=False,
multivalue=True,
cli_name='no_hostgroups',
label=_(u'host group'),
doc=_(u'Search for host groups without these member host groups.'),
),
parameters.Str(
'in_hostgroup',
required=False,
multivalue=True,
cli_name='in_hostgroups',
label=_(u'host group'),
doc=_(u'Search for host groups with these member of host groups.'),
),
parameters.Str(
'not_in_hostgroup',
required=False,
multivalue=True,
cli_name='not_in_hostgroups',
label=_(u'host group'),
doc=_(u'Search for host groups without these member of host groups.'),
),
parameters.Str(
'in_netgroup',
required=False,
multivalue=True,
cli_name='in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for host groups with these member of netgroups.'),
),
parameters.Str(
'not_in_netgroup',
required=False,
multivalue=True,
cli_name='not_in_netgroups',
label=_(u'netgroup'),
doc=_(u'Search for host groups without these member of netgroups.'),
),
parameters.Str(
'in_hbacrule',
required=False,
multivalue=True,
cli_name='in_hbacrules',
label=_(u'HBAC rule'),
doc=_(u'Search for host groups with these member of HBAC rules.'),
),
parameters.Str(
'not_in_hbacrule',
required=False,
multivalue=True,
cli_name='not_in_hbacrules',
label=_(u'HBAC rule'),
doc=_(u'Search for host groups without these member of HBAC rules.'),
),
parameters.Str(
'in_sudorule',
required=False,
multivalue=True,
cli_name='in_sudorules',
label=_(u'sudo rule'),
doc=_(u'Search for host groups with these member of sudo rules.'),
),
parameters.Str(
'not_in_sudorule',
required=False,
multivalue=True,
cli_name='not_in_sudorules',
label=_(u'sudo rule'),
doc=_(u'Search for host groups without these member of sudo rules.'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class hostgroup_mod(Method):
__doc__ = _("Modify a hostgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'A description of this host-group'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class hostgroup_remove_member(Method):
__doc__ = _("Remove members from a hostgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'host',
required=False,
multivalue=True,
cli_name='hosts',
label=_(u'member host'),
doc=_(u'hosts to remove'),
alwaysask=True,
),
parameters.Str(
'hostgroup',
required=False,
multivalue=True,
cli_name='hostgroups',
label=_(u'member host group'),
doc=_(u'host groups to remove'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be removed'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members removed'),
),
)
@register()
class hostgroup_show(Method):
__doc__ = _("Display information about a hostgroup.")
takes_args = (
parameters.Str(
'cn',
cli_name='hostgroup_name',
label=_(u'Host-group'),
doc=_(u'Name of host-group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 | 8,230,526,775,095,126,000 | 27.734838 | 162 | 0.490846 | false |
Pikecillo/genna | external/PyXML-0.8.4/test/dom/html/test.py | 1 | 1659 | fileList = ['Collection',
'Element',
'HTML',
'HEAD',
'LINK',
'TITLE',
'META',
'BASE',
'ISINDEX',
'STYLE',
'BODY',
'FORM',
'SELECT',
'OPTGROUP',
'OPTION',
'INPUT',
'TEXTAREA',
'BUTTON',
'LABEL',
'FIELDSET',
'LEGEND',
'UL',
'OL',
'DL',
'DIR',
'MENU',
'LI',
'BLOCKQUOTE',
'DIV',
'P',
'H',
'Q',
'PRE',
'BR',
'BASEFONT',
'FONT',
'HR',
'MOD',
'A',
'IMG',
'OBJECT',
'PARAM',
'APPLET',
'MAP',
'AREA',
'SCRIPT',
'CAPTION',
'COL',
'TD',
'TR',
'SECTION',
'TABLE',
'FRAMESET',
'FRAME',
'IFRAME',
'DOCUMENT',
'HTML_DOM_IMPLEMENTATION',
]
import string
def test(files):
print 'Testing HTML Level 1'
for file in files:
print '**********Testing HTML %s**********' % file
exec 'import test_%s;_mod = test_%s' % (string.lower(file),string.lower(file));
_mod.test();
if __name__ == '__main__':
import sys
if len(sys.argv) <2:
test(fileList)
else:
test(sys.argv[1:]);
| gpl-2.0 | -6,646,811,134,404,993,000 | 20.828947 | 87 | 0.295359 | false |
mvasilkov/kivy-2014 | rockivy/util.py | 2 | 1225 | from __future__ import division
from collections import namedtuple
import json
from os.path import join as path_join, realpath, dirname
from kivy.core.image import Image
PATH = realpath(path_join(dirname(__file__), '..', 'media'))
Quad = namedtuple('Quad', 'x y size tex')
def blending_is_broken():
# https://github.com/kivy/kivy/issues/2182
test = Image(path_join(PATH, 'test.png')).texture
res = test.pixels[0]
if isinstance(res, str):
res = ord(res)
return res < 240
def load_tex_uv(atlas_name):
with open(path_join(PATH, atlas_name), 'rb') as istream:
atlas_obj = json.loads(istream.read().decode('utf-8'))
tex_name, mapping = atlas_obj.popitem()
tex = Image(path_join(PATH, tex_name)).texture
tex_width, tex_height = tex.size
res = {}
for name, val in mapping.items():
x1, y1 = val[:2]
x2, y2 = x1 + val[2], y1 + val[3]
res[name] = (x1 / tex_width, 1 - y1 / tex_height,
x2 / tex_width, 1 - y2 / tex_height,
val[2] * 0.5, val[3] * 0.5)
return tex, res
def mutate(t, changes):
res = list(t)
for idx, val in changes.items():
res[idx] = val
return tuple(res)
| mit | 1,914,641,888,001,158,400 | 24.520833 | 62 | 0.590204 | false |
apark263/tensorflow | tensorflow/contrib/distributions/python/ops/negative_binomial.py | 34 | 7811 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Negative Binomial distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
class NegativeBinomial(distribution.Distribution):
"""NegativeBinomial distribution.
The NegativeBinomial distribution is related to the experiment of performing
Bernoulli trials in sequence. Given a Bernoulli trial with probability `p` of
success, the NegativeBinomial distribution represents the distribution over
the number of successes `s` that occur until we observe `f` failures.
The probability mass function (pmf) is,
```none
pmf(s; f, p) = p**s (1 - p)**f / Z
Z = s! (f - 1)! / (s + f - 1)!
```
where:
* `total_count = f`,
* `probs = p`,
* `Z` is the normalizaing constant, and,
* `n!` is the factorial of `n`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
total_count,
logits=None,
probs=None,
validate_args=False,
allow_nan_stats=True,
name="NegativeBinomial"):
"""Construct NegativeBinomial distributions.
Args:
total_count: Non-negative floating-point `Tensor` with shape
broadcastable to `[B1,..., Bb]` with `b >= 0` and the same dtype as
`probs` or `logits`. Defines this as a batch of `N1 x ... x Nm`
different Negative Binomial distributions. In practice, this represents
the number of negative Bernoulli trials to stop at (the `total_count`
of failures), but this is still a valid distribution when
`total_count` is a non-integer.
logits: Floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents logits for the probability of success for
independent Negative Binomial distributions and must be in the open
interval `(-inf, inf)`. Only one of `logits` or `probs` should be
specified.
probs: Positive floating-point `Tensor` with shape broadcastable to
`[B1, ..., Bb]` where `b >= 0` indicates the number of batch dimensions.
Each entry represents the probability of success for independent
Negative Binomial distributions and must be in the open interval
`(0, 1)`. Only one of `logits` or `probs` should be specified.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[total_count, logits, probs]) as name:
self._logits, self._probs = distribution_util.get_logits_and_probs(
logits, probs, validate_args=validate_args, name=name)
with ops.control_dependencies(
[check_ops.assert_positive(total_count)] if validate_args else []):
self._total_count = array_ops.identity(total_count)
super(NegativeBinomial, self).__init__(
dtype=self._probs.dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._total_count, self._probs, self._logits],
name=name)
@property
def total_count(self):
"""Number of negative trials."""
return self._total_count
@property
def logits(self):
"""Log-odds of a `1` outcome (vs `0`)."""
return self._logits
@property
def probs(self):
"""Probability of a `1` outcome (vs `0`)."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.total_count),
array_ops.shape(self.probs))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.total_count.get_shape(),
self.probs.get_shape())
def _event_shape_tensor(self):
return array_ops.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Here we use the fact that if:
# lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
# then X ~ Poisson(lam) is Negative Binomially distributed.
rate = random_ops.random_gamma(
shape=[n],
alpha=self.total_count,
beta=math_ops.exp(-self.logits),
dtype=self.dtype,
seed=seed)
return random_ops.random_poisson(
rate,
shape=[],
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, "negative_binom"))
def _cdf(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return math_ops.betainc(self.total_count, 1. + x,
math_ops.sigmoid(-self.logits))
def _log_prob(self, x):
return (self._log_unnormalized_prob(x)
- self._log_normalization(x))
def _log_unnormalized_prob(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (self.total_count * math_ops.log_sigmoid(-self.logits)
+ x * math_ops.log_sigmoid(self.logits))
def _log_normalization(self, x):
if self.validate_args:
x = distribution_util.embed_check_nonnegative_integer_form(x)
return (-math_ops.lgamma(self.total_count + x)
+ math_ops.lgamma(1. + x)
+ math_ops.lgamma(self.total_count))
def _mean(self):
return self.total_count * math_ops.exp(self.logits)
def _mode(self):
adjusted_count = array_ops.where(
1. < self.total_count,
self.total_count - 1.,
array_ops.zeros_like(self.total_count))
return math_ops.floor(adjusted_count * math_ops.exp(self.logits))
def _variance(self):
return self._mean() / math_ops.sigmoid(-self.logits)
| apache-2.0 | -7,202,898,354,091,818,000 | 38.055 | 80 | 0.664192 | false |
twosigma/marbles | marbles/mixins/setup.py | 1 | 2571 | #
# Copyright (c) 2018 Two Sigma Open Source, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import os.path
from setuptools import setup
url = 'https://github.com/twosigma/marbles'
setup_dir = os.path.dirname(__file__)
with open(os.path.join(setup_dir, 'marbles/mixins/VERSION')) as vfile:
__version__ = vfile.read().strip()
with open(os.path.join(setup_dir, 'classifiers.txt'), 'r') as f:
classifiers = [line.strip() for line in f.readlines()]
with open(os.path.join(setup_dir, 'README.rst'), 'r') as f:
long_description = f.read()
setup(
name='marbles.mixins',
version=__version__,
namespace_packages=[
'marbles'
],
packages=[
'marbles.mixins'
],
package_dir={
'marbles.mixins': 'marbles/mixins'
},
package_data={
'marbles.mixins': ['VERSION'],
},
test_suite='tests',
install_requires=[
'pandas<2,>=0.19.1'
],
extras_require={
'dev': [
'coverage',
'flake8',
]
},
license='MIT',
description=('Semantically-rich assertions for use '
'in marbles and unittest test cases'),
long_description=long_description,
long_description_content_type='text/x-rst',
author='Jane Adams, Leif Walsh',
author_email='[email protected], [email protected]',
url=url,
project_urls={
'Documentation': 'https://marbles.readthedocs.io',
'Source': url,
'Tracker': '{url}/issues'.format(url=url)
},
classifiers=classifiers
)
| mit | 4,601,538,994,261,230,600 | 31.544304 | 79 | 0.666667 | false |
eevee/floof | migration/versions/025_Add_remark_to_Artwork.py | 1 | 1530 | from sqlalchemy import *
from migrate import *
import migrate.changeset # monkeypatches Column
from floof.model import now
from floof.model.types import TZDateTime
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
# Modified tables
class Artwork(TableBase):
__tablename__ = 'artwork'
id = Column(Integer, primary_key=True, nullable=False)
resource_id = Column(Integer, nullable=False)
media_type = Column(Enum(u'image', u'text', u'audio', u'video', name='artwork_media_type'), nullable=False)
title = Column(Unicode(133), nullable=False)
hash = Column(Unicode(256), nullable=False, unique=True, index=True)
uploader_user_id = Column(Integer, nullable=False)
uploaded_time = Column(TZDateTime, nullable=False, index=True, default=now)
created_time = Column(TZDateTime, nullable=False, index=True, default=now)
original_filename = Column(Unicode(255), nullable=False)
mime_type = Column(Unicode(255), nullable=False)
file_size = Column(Integer, nullable=False)
rating_count = Column(Integer, nullable=False, default=0)
rating_sum = Column(Float, nullable=False, default=0)
rating_score = Column(Float, nullable=True, default=None)
remark = Column(UnicodeText, nullable=False, server_default=u'')
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
Artwork.__table__.c.remark.create()
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
Artwork.__table__.c.remark.drop()
| isc | -7,214,060,915,947,126,000 | 40.351351 | 111 | 0.731373 | false |
ryancoleman/autodock-vina | boost_1_54_0/tools/build/v2/test/indirect_conditional.py | 44 | 2692 | #!/usr/bin/python
# Copyright (C) 2006. Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
def test_basic():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe a1 : a1.cpp : <conditional>@a1-rule ;
rule a1-rule ( properties * )
{
if <variant>debug in $(properties)
{
return <define>OK ;
}
}
exe a2 : a2.cpp : <conditional>@$(__name__).a2-rule
<variant>debug:<optimization>speed ;
rule a2-rule ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
exe a3 : a3.cpp :
<conditional>@$(__name__).a3-rule-1
<conditional>@$(__name__).a3-rule-2 ;
rule a3-rule-1 ( properties * )
{
if <optimization>speed in $(properties)
{
return <define>OK ;
}
}
rule a3-rule-2 ( properties * )
{
if <variant>debug in $(properties)
{
return <optimization>speed ;
}
}
""")
t.write("a1.cpp", "#ifdef OK\nint main() {}\n#endif\n")
t.write("a2.cpp", "#ifdef OK\nint main() {}\n#endif\n")
t.write("a3.cpp", "#ifdef OK\nint main() {}\n#endif\n")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a1.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a2.exe")
t.expect_addition("bin/$toolset/debug/optimization-speed/a3.exe")
t.cleanup()
def test_glob_in_indirect_conditional():
"""
Regression test: project-rules.glob rule run from inside an indirect
conditional should report an error as it depends on the 'currently loaded
project' concept and indirect conditional rules get called only after all
the project modules have already finished loading.
"""
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
use-project /library-example/foo : util/foo ;
build-project app ;
""")
t.write("app/app.cpp", "int main() {}\n");
t.write("app/jamfile.jam", "exe app : app.cpp /library-example/foo//bar ;")
t.write("util/foo/bar.cpp", """\
#ifdef _WIN32
__declspec(dllexport)
#endif
void foo() {}
""")
t.write("util/foo/jamfile.jam", """\
rule print-my-sources ( properties * )
{
ECHO My sources: ;
ECHO [ glob *.cpp ] ;
}
lib bar : bar.cpp : <conditional>@print-my-sources ;
""")
t.run_build_system(status=1)
t.expect_output_lines(["My sources:", "bar.cpp"], False)
t.expect_output_lines("error: Reference to the project currently being "
"loaded requested when there was no project module being loaded.")
t.cleanup()
test_basic()
test_glob_in_indirect_conditional()
| apache-2.0 | 9,192,750,638,607,795,000 | 24.638095 | 79 | 0.635215 | false |
tundebabzy/frappe | frappe/patches/v8_0/setup_email_inbox.py | 16 | 1282 | import frappe, json
from frappe.core.doctype.user.user import ask_pass_update, setup_user_email_inbox
def execute():
"""
depricate email inbox page if exists
remove desktop icon for email inbox page if exists
patch to remove Custom DocPerm for communication
"""
if frappe.db.exists("Page", "email_inbox"):
frappe.delete_doc("Page", "email_inbox")
desktop_icon = frappe.db.get_value("Desktop Icon", {
"module_name": "Email",
"type": "Page",
"link": "email_inbox"
})
if desktop_icon:
frappe.delete_doc("Desktop Icon", desktop_icon)
frappe.db.sql("""update `tabCustom DocPerm` set `write`=0, email=1 where parent='Communication'""")
setup_inbox_from_email_account()
def setup_inbox_from_email_account():
""" add user inbox child table entry for existing email account in not exists """
frappe.reload_doc("core", "doctype", "user_email")
frappe.reload_doc("email", "doctype", "email_account")
email_accounts = frappe.get_all("Email Account", filters={"enable_incoming": 1},
fields=["name", "email_id", "awaiting_password", "enable_outgoing"])
for email_account in email_accounts:
setup_user_email_inbox(email_account.get("name"), email_account.get("awaiting_password"),
email_account.get("email_id"), email_account.get("enabled_outgoing")) | mit | -5,010,783,102,921,261,000 | 32.763158 | 100 | 0.710608 | false |
davzhang/helix-python-binding | org/apache/helix/messaging/AsyncCallback.py | 1 | 4472 | # package org.apache.helix.messaging
#from org.apache.helix.messaging import *
#from java.util import ArrayList
#from java.util import Date
#from java.util import List
#from java.util import Timer
#from java.util import TimerTask
#from org.apache.log4j import Logger
import time
from org.apache.helix.model.Message import Message
from org.apache.helix.util.logger import get_logger
#from org.apache.helix.util.UserExceptions import IllegalArgumentException
from org.apache.helix.util.misc import enum
class AsyncCallback:
"""
Java modifiers:
private static
Type:
Logger
"""
_logger = get_logger(__name__)
"""
Parameters:
long timeout
"""
def __init__(self, timeout=-1):
self._logger.info("Setting time out to " + timeout + " ms")
self._timeout = timeout
self._startTimeStamp = 0
self._messagesSent = []
self._messageReplied = []
self._timedOut = False
self._isInterrupted = False
# Timer self._timer = null;
# List<Message> self._messagesSent;
# protected final List<Message> self._messageReplied = new ArrayList<Message>();
# boolean self._timedOut = false;
# boolean self._isInterrupted = false;
def setTimeout(self, timeout):
"""
Returns void
Parameters:
timeout: long
Java modifiers:
final
"""
self._logger.info("Setting time out to " + timeout + " ms")
self._timeout = timeout
def getMessageReplied(self):
"""
Returns List<Message>
"""
return self._messageReplied
def isInterrupted(self):
"""
Returns boolean
"""
return self._isInterrupted
def setInterrupted(self, b):
"""
Returns void
Parameters:
b: boolean
"""
self._isInterrupted = True
def onReply(self, message):
"""
Returns void
Parameters:
message: Message
Java modifiers:
final synchronized
"""
self._logger.info("OnReply msg " + message.getMsgId())
if not self.isDone():
self._messageReplied.add(message)
try:
self.onReplyMessage(message)
except Exception, e:
self._logger.error(e)
if self.isDone():
if self._timer != None:
self._timer.cancel()
self.notifyAll()
def isDone(self):
"""
Returns boolean
"""
return self._messageReplied.size() == self._messagesSent.size()
def isTimedOut(self):
"""
Returns boolean
"""
return self._timedOut
def setMessagesSent(self, generatedMessage):
"""
Returns void
Parameters:
generatedMessage: List<Message>
Java modifiers:
final
"""
self._messagesSent = generatedMessage
def startTimer(self):
"""
Returns void
Java modifiers:
final
"""
if self._timer == None and self._timeout > 0:
if self._startTimeStamp == 0:
# self._startTimeStamp = Date().getTime()
self._startTimeStamp = time.time()
#TODO: fix timer
# self._timer = Timer(True)
# self._timer.schedule(TimeoutTask(self), self._timeout)
def onTimeOut(self):
"""
Returns void
Java modifiers:
abstract
"""
pass
def onReplyMessage(self, message):
"""
Returns void
Parameters:
message: Message
Java modifiers:
abstract
"""
pass
# class TimeoutTask(TimerTask):
#
#
#
# """
#
# Parameters:
# AsyncCallback asyncCallback
# """
# def __init__(self, asyncCallback):
# self._callback = asyncCallback
#
#
# def run(self):
# """
# Returns void
# @Override
#
#
# """
# try:
# #TODO
## synchronized (_callback)
# self._callback.self._timedOut = True
# self._callback.notifyAll()
# self._callback.onTimeOut()
#
# except Exception, e:
# self._logger.error(e)
#
#
#
#
#
| apache-2.0 | 2,761,119,437,093,431,300 | 19.420091 | 87 | 0.522361 | false |
dT9r/Neo | tests/shrooms_test/shrooms.py | 2 | 3077 | import random
import numpy as np
import matplotlib.pyplot as plt
from lib.layers.linear import Linear
from lib.layers.softmax import SoftmaxCrossEntropyLayer
from lib.layers.tanh import Tanh
from lib.loss_functions.cross_entropy import SoftmaxCrossEntropyLoss
from lib.loss_functions.mse import MSELoss
from lib.models.sequential_model import SequentialModel
from lib.parameter_updates import SGD, Momentum
from lib.preprocessing import mean_subtraction, normalization
test_train_ratio = 1.0 / 5
def parse_line(line):
x = []
chars = line.strip('\n').split(",")
for c in chars[1:]:
x += [ord(c)]
y = ord(chars[0]) == ord('p')
return np.array([x]).T, np.array([[float(y)], [float(not y)]])
data_file = open("data.txt", mode='r').readlines()
random.shuffle(data_file)
x_data, y_data = parse_line(data_file[0])
# parse the data file
for line in data_file[1:]:
x, y = parse_line(line)
y_data = np.concatenate((y_data, y), axis=1)
x_data = np.concatenate((x_data, x), axis=1)
data_size = len(x_data[0])
# normalize the data
x_data = mean_subtraction(x_data)
x_data = normalization(x_data)
# split the data
test_part = int(test_train_ratio * data_size)
# test_part = data_size - 4500
x_train = np.array(x_data[:, test_part:])
y_train = np.array(y_data[:, test_part:])
x_test = np.array(x_data[:, :test_part])
y_test = np.array(y_data[:, :test_part])
# ============ Training done here! =============
# -------------- 96 % model ------------
# rate = 0.007
# model = SequentialModel(rate, MSELoss())
# model.add_layer(Linear(22, 30, parameter_update=SGD()))
# model.add_layer(Tanh())
# model.add_layer(Linear(30, 2, parameter_update=SGD()))
# model.add_layer(Tanh())
# ------------ 97 % model ---------------
# rate = 0.009
# model = SequentialModel(rate, MSELoss())
# model.add_layer(Linear(22, 30, parameter_update=Momentum()))
# model.add_layer(Tanh())
# model.add_layer(Linear(30, 30, parameter_update=Momentum()))
# model.add_layer(Tanh())
# model.add_layer(Linear(30, 2, parameter_update=Momentum()))
# model.add_layer(Tanh())
# ----------- 99 % model !!! the BEST --------
rate = 0.001
model = SequentialModel(rate, SoftmaxCrossEntropyLoss())
model.add_layer(Linear(22, 30, parameter_update=Momentum()))
model.add_layer(Tanh())
model.add_layer(Linear(30, 30, parameter_update=Momentum()))
model.add_layer(Tanh())
model.add_layer(Linear(30, 2, parameter_update=Momentum()))
model.add_layer(SoftmaxCrossEntropyLayer())
# print(x_train.shape)
# print(y_train.shape)
errors = model.train(x_train, y_train, batch_size=20, error=True)
correct = 0.0
for i in range(x_test.shape[1]):
case = np.array([x_test[:, i]]).T
pred = model.forward(case)
target = y_test[:, i]
# print(pred)
if pred[0] > 0.5 and target[0] == 1:
correct += 1
elif pred[0] < 0.5 and target[0] == 0:
correct += 1
print("[*] Test data size: {}".format(x_test.shape[1]))
print("[*] Test Result: {}".format(correct / x_test.shape[1]))
error_x = np.arange(0, len(errors), 1)
plt.plot(error_x, errors)
plt.show()
| mit | -8,628,914,272,379,935,000 | 26.720721 | 68 | 0.650309 | false |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py | 4 | 1574 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
from torch.utils.data.sampler import BatchSampler
class IterationBasedBatchSampler(BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler, num_iterations, start_iter=0, random_number_generator=None):
self.batch_sampler = batch_sampler
self.num_iterations = num_iterations
self.start_iter = start_iter
self.random_number_generator = random_number_generator
def __iter__(self):
iteration = self.start_iter
while iteration <= self.num_iterations:
# if the underlying sampler has a set_epoch method, like
# DistributedSampler, used for making each process see
# a different split of the dataset, then set it
if hasattr(self.batch_sampler.sampler, "set_epoch"):
if self.random_number_generator is not None:
iteration_seed = self.random_number_generator.randint(0, 2 ** 32 - 1)
self.batch_sampler.sampler.set_epoch(iteration_seed)
else:
self.batch_sampler.sampler.set_epoch(iteration)
for batch in self.batch_sampler:
iteration += 1
if iteration > self.num_iterations:
break
yield batch
def __len__(self):
return self.num_iterations
| apache-2.0 | 7,547,716,869,282,416,000 | 41.540541 | 98 | 0.626429 | false |
robbi/pyload | module/plugins/accounts/RapideoPl.py | 8 | 2553 | # -*- coding: utf-8 -*-
import datetime
import hashlib
import time
from ..internal.misc import json
from ..internal.MultiAccount import MultiAccount
class RapideoPl(MultiAccount):
__name__ = "RapideoPl"
__type__ = "account"
__version__ = "0.10"
__status__ = "testing"
__config__ = [("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12)]
__description__ = "Rapideo.pl account plugin"
__license__ = "GPLv3"
__authors__ = [("goddie", "[email protected]")]
API_URL = "http://enc.rapideo.pl"
API_QUERY = {'site': "newrd",
'username': "",
'password': "",
'output': "json",
'loc': "1",
'info': "1"}
def grab_hosters(self, user, password, data):
html = self.load("https://www.rapideo.pl/clipboard.php?json=3").strip()
hostings = json.loads(html)
hostings_domains = [
domain for row in hostings for domain in row['domains']
if row['sdownload'] == "0"
]
self.log_debug(hostings_domains)
return hostings_domains
def grab_info(self, user, password, data):
try:
result = json.loads(self.run_auth_query())
except Exception:
#@TODO: return or let it be thrown?
return
premium = False
valid_untill = -1
if "expire" in result.keys() and result['expire']:
premium = True
valid_untill = time.mktime(datetime.datetime.fromtimestamp(
int(result['expire'])).timetuple())
traffic_left = result['balance']
return {'validuntil': valid_untill,
'trafficleft': traffic_left,
'premium': premium}
def signin(self, user, password, data):
data['usr'] = user
data['pwd'] = hashlib.md5(password).hexdigest()
try:
response = json.loads(self.run_auth_query())
except Exception:
self.fail_login()
if "errno" in response.keys():
self.fail_login()
def create_auth_query(self):
query = self.API_QUERY
query['username'] = self.info['data']['usr']
query['password'] = self.info['data']['pwd']
return query
def run_auth_query(self):
return self.load(self.API_URL,
post=self.create_auth_query())
| gpl-3.0 | 631,308,957,204,029,600 | 28.686047 | 85 | 0.534273 | false |
davicustodio/geonode | geonode/base/admin.py | 12 | 3893 | from django.contrib import admin
from django.conf import settings
import autocomplete_light
from autocomplete_light.contrib.taggit_field import TaggitField, TaggitWidget
from modeltranslation.admin import TranslationAdmin
from geonode.base.models import (TopicCategory, SpatialRepresentationType, Region, RestrictionCodeType,
ContactRole, Link, License)
class MediaTranslationAdmin(TranslationAdmin):
class Media:
js = (
'modeltranslation/js/tabbed_translation_fields.js',
)
css = {
'screen': ('modeltranslation/css/tabbed_translation_fields.css',),
}
class LicenseAdmin(MediaTranslationAdmin):
model = License
list_display = ('id', 'name')
list_display_links = ('name',)
class TopicCategoryAdmin(MediaTranslationAdmin):
model = TopicCategory
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
if settings.MODIFY_TOPICCATEGORY is False:
exclude = ('identifier', 'description',)
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
if settings.MODIFY_TOPICCATEGORY:
return True
else:
return False
class RegionAdmin(MediaTranslationAdmin):
model = Region
list_display_links = ('name',)
list_display = ('code', 'name', 'parent')
search_fields = ('code', 'name',)
group_fieldsets = True
class SpatialRepresentationTypeAdmin(MediaTranslationAdmin):
model = SpatialRepresentationType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class RestrictionCodeTypeAdmin(MediaTranslationAdmin):
model = RestrictionCodeType
list_display_links = ('identifier',)
list_display = ('identifier', 'description', 'gn_description', 'is_choice')
def has_add_permission(self, request):
# the records are from the standard TC 211 list, so no way to add
return False
def has_delete_permission(self, request, obj=None):
# the records are from the standard TC 211 list, so no way to remove
return False
class ContactRoleAdmin(admin.ModelAdmin):
model = ContactRole
list_display_links = ('id',)
list_display = ('id', 'contact', 'resource', 'role')
list_editable = ('contact', 'resource', 'role')
form = autocomplete_light.modelform_factory(ContactRole)
class LinkAdmin(admin.ModelAdmin):
model = Link
list_display_links = ('id',)
list_display = ('id', 'resource', 'extension', 'link_type', 'name', 'mime')
list_filter = ('resource', 'extension', 'link_type', 'mime')
search_fields = ('name', 'resource__title',)
form = autocomplete_light.modelform_factory(Link)
admin.site.register(TopicCategory, TopicCategoryAdmin)
admin.site.register(Region, RegionAdmin)
admin.site.register(SpatialRepresentationType, SpatialRepresentationTypeAdmin)
admin.site.register(RestrictionCodeType, RestrictionCodeTypeAdmin)
admin.site.register(ContactRole, ContactRoleAdmin)
admin.site.register(Link, LinkAdmin)
admin.site.register(License, LicenseAdmin)
class ResourceBaseAdminForm(autocomplete_light.ModelForm):
keywords = TaggitField(widget=TaggitWidget('TagAutocomplete'), required=False)
| gpl-3.0 | -3,501,797,586,066,749,400 | 33.451327 | 103 | 0.692525 | false |
arthurdk/gk-analysis | GKFetcher.py | 1 | 6220 | from bs4 import BeautifulSoup
import urllib2
import urllib
from GKReview import *
import pickle
import os.path
import re
import sys
from PIL import Image
import numpy as np
from os import path
db_file = "gk-list.db"
db_file2 = "gk-full.db"
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def fetch_parse_test_content(url):
"""
Spaghetti code retrieving content from a single test
:param url:
:return: the test content (string)
"""
content = ""
try:
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
content = soup.find("div", {"id": "articleId"}).getText()
# conclusion = soup.find("div", {"id": "story-conclusion"}).getText()
# summary = soup.find("div", {"class": "summary"}).getText() # Pro and Cons
except AttributeError, urllib2.HTTPError:
print("An error happened, review (content) skipped.")
pass
return content.encode('utf-8')
def fetch_parse_page(url):
"""
Spaghetti code retrieving a list of reviews
:param url:
:return: list of reviews
"""
gk_reviews = []
nb_try = 0
while(nb_try < 3):
try:
html = urllib2.urlopen(url).read()
soup = BeautifulSoup(html, 'html.parser')
for article in soup.find_all('article'):
try:
review_content = article.findAll("div", {"class": "review-content"})
rating = article.findAll("div", {"class": "rating"})[0].getText()
if len(review_content) > 0 and represents_int(rating):
review_soup = review_content[0]
info_div = review_soup.findAll("p", {"class": "title"})[0]
review_link = "http://www.gamekult.com" + info_div.findAll("a")[0].get('href')
title = info_div.getText()
# Shitty regex hackline
raw_reviewer = review_soup.findAll("p", {"class": "byline"})[0].getText()
raw_reviewer = raw_reviewer.split("Par ")[1].split(", le")
reviewer = raw_reviewer[0]
match = re.search(r'(\d+/\d+/\d+)', raw_reviewer[1])
date = match.group(1)
review = GKReview(title=title, reviewer=reviewer, review_link=review_link,
rating=rating,
date=date)
gk_reviews.append(review)
except:
print("Failed to parse test from list")
print(article.prettify())
print "Unexpected error:", sys.exc_info()[0]
pass
nb_try = 3
except urllib2.HTTPError:
print("Entire page skipped due to server error")
if nb_try < 3:
print("Retrying...")
return gk_reviews
def fetch_parse_nth_first_page(nb_page, force_download, cache):
"""
Spaghetti code retrieving a list of reviews for the n-th last page of tests
:param nb_page: number of page to process
:param force_download:
:param cache: true if cache allowed
:return: list of reviews
"""
reviews = []
# Fetch test listing if needed
if not os.path.isfile(db_file) or force_download:
page = 1
condition = True
while condition:
url = "http://www.gamekult.com/jeux/test-jeux-video.html?p=%d" % page
fetched = fetch_parse_page(url)
reviews = reviews + fetched
print("Processed page %s" % page)
page += 1
condition = len(fetched) > 0 and page <= nb_page
if cache:
with open(db_file, 'wb') as output:
pickle.dump(reviews, output, protocol=pickle.HIGHEST_PROTOCOL)
elif os.path.isfile(db_file):
with open(db_file, 'r') as data_in:
reviews = pickle.load(data_in)
return reviews
def image_url_fetcher(url):
response = urllib.urlopen(url)
content = response.read()
outf = open("tmp-mask", 'wb')
outf.write(content)
outf.close()
d = path.dirname(__file__)
return image_fetcher(path.join(d, "tmp-mask"))
def image_fetcher(filepath):
return np.array(Image.open(filepath))
def fetch_parse_full_tests(force_download, cache, nb_page):
"""
Fetch & Parse both lists and content
:param force_download:
:param cache: true if cache allowed
:param nb_page:
:return: list of reviews
"""
reviews = []
# Fetch tests content if needed
if not os.path.isfile(db_file2) or force_download:
print("Fetching data..")
reviews = fetch_parse_nth_first_page(nb_page, force_download=force_download, cache=cache)
for index, review in enumerate(reviews):
review.content = fetch_parse_test_content(review.link)
print("%d/%d" % (index + 1, len(reviews)))
if cache:
with open(db_file2, 'wb') as output:
pickle.dump(reviews, output, protocol=pickle.HIGHEST_PROTOCOL)
elif os.path.isfile(db_file2):
print("Nothing to fetch, data loaded from disk.")
with open(db_file2, 'r') as data_in:
reviews = pickle.load(data_in)
return reviews
'''
def fetch_translation(reviews):
for review in reviews:
try:
params = {"lang": "fr-en", "text": review.content,
"key": "my_key",
'format': "plain"}
url = "https://translate.yandex.net/api/v1.5/tr.json/translate?%s" % (urllib.urlencode(params))
data = urllib2.urlopen(url).read()
review.translation = data
except:
print("A translation error happened.")
pass
return reviews
'''
# For manual testing purpose
def main():
reviews = fetch_parse_nth_first_page(10)
for review in reviews:
print review.link
review.content = fetch_parse_test_content(review.link)
print(review.content)
break
if __name__ == "__main__":
main()
| mit | 17,952,096,803,976,520 | 32.085106 | 107 | 0.557235 | false |
MaximKsh/web_1sem | repository/askkashirin/askservice/migrations/0001_initial.py | 1 | 4667 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('rating', models.IntegerField(verbose_name='Рейтинг')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Автор')),
],
options={
'verbose_name': 'Лайк',
'verbose_name_plural': 'Лайки',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
],
options={
'verbose_name': 'Сообщение',
'verbose_name_plural': 'Сообщения',
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatarFilename', models.CharField(blank=True, max_length=255, verbose_name='Имя файла аватара')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль',
'verbose_name_plural': 'Профили',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(max_length=255, verbose_name='Тег')),
],
options={
'verbose_name': 'Тег',
'verbose_name_plural': 'Теги',
},
),
migrations.CreateModel(
name='Answer',
fields=[
('post_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='askservice.Post')),
('content', models.TextField(verbose_name='Содержание')),
('approved', models.BooleanField(default=False, verbose_name='Правильный')),
],
options={
'verbose_name': 'Ответ',
'verbose_name_plural': 'Ответы',
},
bases=('askservice.post',),
),
migrations.CreateModel(
name='Question',
fields=[
('post_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='askservice.Post')),
('title', models.CharField(max_length=255, verbose_name='Название')),
('content', models.TextField(blank=True, verbose_name='Содержание')),
],
options={
'verbose_name': 'Вопрос',
'verbose_name_plural': 'Вопросы',
},
bases=('askservice.post',),
),
migrations.AddField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='like',
name='post',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='askservice.Post', verbose_name='К сообщению'),
),
migrations.AddField(
model_name='answer',
name='to_question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='askservice.Question', verbose_name='На вопрос'),
),
]
| unlicense | 2,540,442,368,261,894,000 | 40.915888 | 190 | 0.550279 | false |
lpantano/bcbio-nextgen | bcbio/bam/coverage.py | 7 | 6031 | """
calculate coverage across a list of regions
"""
import os
import six
import matplotlib as mpl
mpl.use('Agg', force=True)
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib
import seaborn as sns
import pandas as pd
import pybedtools
from bcbio.utils import rbind, file_exists
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
import bcbio.pipeline.datadict as dd
from pylab import stem, setp
from collections import defaultdict
from itertools import repeat
def _calc_regional_coverage(in_bam, chrom, start, end, samplename, work_dir):
"""
given a BAM and a region, calculate the coverage for each base in that
region. returns a pandas dataframe of the format:
chrom position coverage name
where the samplename column is the coverage at chrom:position
"""
region_bt = pybedtools.BedTool("%s\t%s\t%s\n" % (chrom, start, end), from_string=True).saveas()
region_file = region_bt.fn
coords = "%s:%s-%s" % (chrom, start, end)
tx_tmp_file = os.path.join(work_dir, "coverage-%s-%s.txt" % (samplename, coords.replace(":", "_")))
cmd = ("samtools view -b {in_bam} {coords} | "
"bedtools coverage -a {region_file} -b - -d > {tx_tmp_file}")
do.run(cmd.format(**locals()), "Plotting coverage for %s %s" % (samplename, coords))
names = ["chom", "start", "end", "offset", "coverage"]
df = pd.io.parsers.read_table(tx_tmp_file, sep="\t", header=None,
names=names).dropna()
os.remove(tx_tmp_file)
df["sample"] = samplename
df["chrom"] = chrom
df["position"] = df["start"] + df["offset"] - 1
return df[["chrom", "position", "coverage", "sample"]]
def _combine_regional_coverage(in_bams, samplenames, chrom, start, end, work_dir):
"""
given a list of bam files, sample names and a region, calculate the
coverage in the region for each of the samples and return a tidy pandas
dataframe of the format:
chrom position coverage name
"""
dfs = [_calc_regional_coverage(bam, chrom, start, end, sample, work_dir) for bam, sample
in zip(in_bams, samplenames)]
return rbind(dfs)
def _get_caller_colormap(callers):
colors = matplotlib.colors.ColorConverter.colors.keys()
return {caller: colors[index] for index, caller in enumerate(callers)}
def _get_caller_heights(callers, plot):
max_y = plot.get_ylim()[1] * 0.2
spacing = max_y / len(callers)
return {caller: spacing + spacing * index for index, caller in enumerate(callers)}
def _get_stems_by_callers(intervals):
stems = defaultdict(list)
for interval in intervals:
pos = interval.start
caller = interval.fields[3]
stems[caller].append(pos)
return stems
def _add_stems_to_plot(interval, stem_bed, samples, plot):
stems = _get_stems_by_callers(stem_bed.tabix_intervals(interval))
callers = sorted(stems.keys())
caller_colormap = _get_caller_colormap(callers)
caller_heights = _get_caller_heights(callers, plot)
for caller in callers:
stem_color = caller_colormap[caller]
caller_stems = stems[caller]
stem_heights = list(repeat(caller_heights[caller], len(caller_stems)))
markerline, _, baseline = stem(caller_stems, stem_heights, '-.',
label=caller)
setp(markerline, 'markerfacecolor', stem_color)
setp(baseline, 'color', 'r', 'linewidth', 0)
plt.legend()
def _split_regions(chrom, start, end):
"""Split regions longer than 100kb into smaller sections.
"""
window_size = 1e5
if end - start < window_size * 5:
return [(chrom, start, end)]
else:
out = []
for r in pybedtools.BedTool().window_maker(w=window_size,
b=pybedtools.BedTool("%s\t%s\t%s" % (chrom, start, end),
from_string=True)):
out.append((r.chrom, r.start, r.end))
return out
def plot_multiple_regions_coverage(samples, out_file, region_bed=None, stem_bed=None):
"""
given a list of bcbio samples and a bed file or BedTool of regions,
makes a plot of the coverage in the regions for the set of samples
if given a bed file or BedTool of locations in stem_bed with a label,
plots lollipops at those locations
"""
PAD = 100
if file_exists(out_file):
return out_file
in_bams = [dd.get_align_bam(x) for x in samples]
samplenames = [dd.get_sample_name(x) for x in samples]
if isinstance(region_bed, six.string_types):
region_bed = pybedtools.BedTool(region_bed)
if isinstance(stem_bed, six.string_types):
stem_bed = pybedtools.BedTool(stem_bed)
if stem_bed is not None: # tabix indexed bedtools eval to false
stem_bed = stem_bed.tabix()
plt.clf()
plt.cla()
with file_transaction(out_file) as tx_out_file:
with PdfPages(tx_out_file) as pdf_out:
sns.despine()
for line in region_bed:
for chrom, start, end in _split_regions(line.chrom, max(line.start - PAD, 0),
line.end + PAD):
df = _combine_regional_coverage(in_bams, samplenames, chrom,
start, end, os.path.dirname(tx_out_file))
plot = sns.tsplot(df, time="position", unit="chrom",
value="coverage", condition="sample")
if stem_bed is not None: # tabix indexed bedtools eval to false
interval = pybedtools.Interval(chrom, start, end)
_add_stems_to_plot(interval, stem_bed, samples, plot)
plt.title("{chrom}:{start}-{end}".format(**locals()))
pdf_out.savefig(plot.get_figure())
plt.close()
return out_file
| mit | -3,364,233,992,817,136,000 | 40.881944 | 107 | 0.613 | false |
WimPessemier/uaf | examples/pyuaf/client/how_to_create_a_client.py | 3 | 3439 | # examples/pyuaf/client/how_to_create_a_client.py
"""
EXAMPLE: how to create a client
====================================================================================================
The Client constructor signature looks like this:
pyuaf.client.Client.__init__(settings=None, loggingCallback=None)
with:
- 'settings': optional: could be
- a pyuaf.client.settings.ClientSettings instance
- or simply a string (the name of the client)
- or None (so default pyuaf.client.settings.ClientSettings() are used).
- 'loggingCallback': optional: a callback function to catch log messages of type pyuaf.util.LogMessage.
See the PyUAF HTML documentation for more info.
"""
import time, os
import pyuaf
from pyuaf.client import Client
from pyuaf.client.settings import ClientSettings
from pyuaf.util import loglevels, Address, NodeId
# we can create some ClientSettings:
settings = ClientSettings()
settings.applicationName = "MyClient"
settings.discoveryUrls.append("opc.tcp://localhost:4841")
settings.logToStdOutLevel = loglevels.Info # print Info, Warning and Error logging to the console
settings.logToCallbackLevel = loglevels.Debug # send Debug, Info, Warning and Error logging to the callback
# And if you want to catch the logging output, you may also define a callback.
# In this case we define a callback to write the logging output to a file in the user's home directory.
# (but this callback is optional of course, only define one if you want to do something more with the
# logging output than simply printing it to the console (i.e. sending it to the stdout))
f = open(os.path.expanduser("~/my_logging_output.txt"), "w")
def callback(msg):
logDetailsString = ""
logDetailsString += time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime(msg.ctime))
logDetailsString += ".%.3d " %msg.msec
logDetailsString += "%-10s " %msg.applicationName
logDetailsString += "%-20s " %msg.loggerName
logDetailsString += "%-8s " %loglevels.toString(msg.level)
# msg.message may contain multiple lines, so we prepend the other logging details in
# front of each line:
for line in msg.message.splitlines():
f.write("%s: %s\n" %(logDetailsString, line))
# Example 0: create a client with the default settings and no logging callback
# --------------------------------------------------------------------------------------------------
client0 = Client()
# Example 1: create a client by specifying the settings
# --------------------------------------------------------------------------------------------------
client1 = Client(settings)
# Example 2: create a client by specifying the settings
# --------------------------------------------------------------------------------------------------
client2 = Client(settings, callback)
# Example 3: create a client by specifying only the logging callback
# --------------------------------------------------------------------------------------------------
client3 = Client(loggingCallback=callback)
# you can still provide settings and register a logging callback afterwards, but you'll need to use:
# client0.setClientSettings(settings)
# client0.registerLoggingCallback(callback)
# read the Value attribute of some non-existing node in order to have some Error output sent to the callback:
client2.read([Address(NodeId("InvalidIdentifier","InvalidNamespace"),"InvalidServerURI")])
f.close() | lgpl-3.0 | 8,404,964,052,538,288,000 | 44.263158 | 109 | 0.633324 | false |
umitproject/packet-manipulator | umit/pm/backend/scapy/context/sequence.py | 2 | 12945 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008, 2009 Adriano Monteiro Marques
#
# Author: Francesco Piccinno <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from threading import Lock
from umit.pm.core.i18n import _
from umit.pm.core.logger import log
from umit.pm.core.atoms import with_decorator
from umit.pm.backend.scapy.serialize import load_sequence, save_sequence
from umit.pm.backend.scapy.utils import execute_sequence
def register_sequence_context(BaseSequenceContext):
class SequenceContext(BaseSequenceContext):
file_types = [(_('Scapy sequence'), '*.pms'),
(_('Flat sequence (pcap)'), '*.pcap'),
(_('Flat sequence (pcap + gz)'), '*.pcap.gz')]
def __init__(self, seq, count=1, inter=0, iface=None, strict=True, \
report_recv=False, report_sent=True, capmethod=0, \
scallback=None, rcallback=None, sudata=None, rudata=None):
BaseSequenceContext.__init__(self, seq, count, inter, iface,
strict, report_recv, report_sent,
capmethod,
scallback, rcallback, sudata, rudata)
self.sequencer = None
self.internal = False
self.lock = Lock()
if count:
self.summary = _('Executing sequence (%d packets %d times)') % \
(self.tot_packet_count, count)
else:
self.summary = _('Looping sequence (%d packets)') % \
self.tot_packet_count
def load(self, operation=None):
log.debug("Loading sequence from %s" % self.cap_file)
if self.cap_file:
self.seq = None
try:
plen = 0
fsize = None
loader = load_sequence(self.cap_file)
for tree, tlen, perc, size in loader.parse_async():
if operation and tlen % 10 == 0 :
if not fsize:
if size >= 1024 ** 3:
fsize = "%.1f GB" % (size / (1024.0 ** 3))
elif size >= 1024 ** 2:
fsize = "%.1f MB" % (size / (1024.0 ** 2))
else:
fsize = "%.1f KB" % (size / 1024.0)
operation.summary = \
_('Loading sequence %s - %d packets (%s)') % \
(self.cap_file, tlen, fsize)
operation.percentage = perc
self.seq = tree
plen = tlen
self.tot_loop_count = loader.attr_loopcnt
self.inter = loader.attr_inter
self.strict = loader.attr_strict
self.report_sent = loader.attr_sent
self.report_recv = loader.attr_recv
self.title = self.cap_file
self.summary = \
_('Sequence %s loaded - %d packets (%s)') % \
(self.cap_file, plen, fsize)
if operation:
operation.summary = self.summary
operation.percentage = 100.0
except Exception, err:
self.seq = None
self.summary = str(err)
if operation:
operation.summary = str(err)
operation.percentage = 100.0
if self.seq is not None:
self.status = self.SAVED
return True
self.status = self.NOT_SAVED
return False
def save(self, operation=None):
if self.cap_file.lower().endswith('.pcap') or \
self.cap_file.lower().endswith('.pcap.gz'):
log.debug("Saving sequence as pcap file to %s" % self.cap_file)
old = []
old, self.data = self.data, old
for node in self.seq.get_children():
self.data.append(node.data.packet)
import umit.pm.backend
ret = umit.pm.backend.StaticContext.save(self, operation)
old, self.data = self.data, old
return ret
log.debug("Saving sequence to %s" % self.cap_file)
if self.cap_file and self.seq is not None:
try:
idx = 0
size = 0
for idx, perc, size in save_sequence(self.cap_file,
self.seq, self.strict,
self.report_recv,
self.report_sent,
self.tot_loop_count,
self.inter):
if operation and idx % 10 == 0 :
if size >= 1024 ** 3:
fsize = "%.1f GB" % (size / (1024.0 ** 3))
elif size >= 1024 ** 2:
fsize = "%.1f MB" % (size / (1024.0 ** 2))
else:
fsize = "%.1f KB" % (size / 1024.0)
operation.summary = \
_('Saving sequence to %s - %d packets (%s)') % \
(self.cap_file, idx, fsize)
operation.percentage = perc
if size >= 1024 ** 3:
fsize = "%.1f GB" % (size / (1024.0 ** 3))
elif size >= 1024 ** 2:
fsize = "%.1f MB" % (size / (1024.0 ** 2))
else:
fsize = "%.1f KB" % (size / 1024.0)
self.title = self.cap_file
self.summary = \
_('Sequence %s saved - %d packets (%s)') % \
(self.cap_file, idx, fsize)
if operation:
operation.summary = self.summary[:]
operation.percentage = 100.0
self.status = self.SAVED
return True
except Exception, err:
self.summary = str(err)
if operation:
operation.summary = str(err)
operation.percentage = 100.0
self.status = self.NOT_SAVED
return False
self.status = self.NOT_SAVED
return False
@with_decorator
def get_all_data(self):
return BaseSequenceContext.get_all_data(self)
@with_decorator
def get_data(self):
return BaseSequenceContext.get_data(self)
@with_decorator
def set_data(self, val):
return BaseSequenceContext.set_data(self)
# We really need this lock here?
@with_decorator
def get_sequence(self):
return BaseSequenceContext.get_sequence(self)
@with_decorator
def set_sequence(self, val):
return BaseSequenceContext.set_sequence(self, val)
def _start(self):
if not self.tot_loop_count or \
self.tot_packet_count - self.packet_count > 0 or \
self.tot_loop_count - self.loop_count > 0:
self.internal = True
self.state = self.RUNNING
self.sequencer = execute_sequence(
self.seq,
self.tot_loop_count - self.loop_count,
self.inter, self.iface, self.strict,
self.capmethod,
self.__send_callback, self.__recv_callback,
self.sudata, self.rudata, self.__exc_callback
)
return True
return False
def _resume(self):
if self.sequencer and self.sequencer.isAlive():
return False
return self._start()
def _restart(self):
if self.sequencer and self.sequencer.isAlive():
return False
self.packet_count = 0
self.loop_count = 0
self.percentage = 0.0
self.answers = 0
self.received = 0
return self._start()
def _stop(self):
self.internal = False
if self.sequencer:
self.sequencer.stop()
else:
self.state = self.NOT_RUNNING
return True
_pause = _stop
def __exc_callback(self, exc):
self.internal = False
self.state = self.NOT_RUNNING
self.summary = str(exc)
def __send_callback(self, packet, want_reply, udata):
if not packet:
self.loop_count += 1
if self.tot_loop_count:
self.summary = _('Running sequence %d of %d times') % \
(self.loop_count, self.tot_loop_count)
else:
self.summary = _('Sequence runned for %d times') % \
self.loop_count
else:
self.packet_count += 1
if want_reply:
self.summary = _('Sending packet %s and waiting a reply') \
% packet.summary()
else:
self.summary = _('Sending packet %s') % packet.summary()
if self.report_sent:
self.data.append(packet)
pkts = self.packet_count % self.tot_packet_count
if self.packet_count >= self.tot_packet_count and pkts == 0 and \
not self.tot_loop_count:
pkts = 1
else:
pkts /= float(self.tot_packet_count)
# Calculate percentage using also the loop counter if we
# are not in infinite loop.
if self.tot_loop_count:
self.percentage = \
((pkts * (1.0 / self.tot_loop_count)) * \
(float(self.loop_count) /
float(self.tot_loop_count))) * 100.0
else:
self.percentage = pkts * 100.0
if self.scallback:
# FIXME: THIS FUCKING UDATA also in other files
self.scallback(packet, want_reply, self.loop_count,
self.packet_count, udata)
if not self.internal:
self.state = self.NOT_RUNNING
return self.state == self.NOT_RUNNING or \
self.state == self.PAUSED
def __recv_callback(self, packet, reply, is_reply, udata):
if reply is None:
if self.loop_count == self.tot_loop_count:
self.internal = False
self.summary = _('Sequence finished with %d packets sent '
'and %d received') % (self.packet_count,
self.received)
else:
self.summary = _('Looping sequence')
else:
self.received += 1
self.summary = _('Received %s') % reply.summary()
if is_reply or self.report_recv:
self.data.append(reply)
if self.rcallback:
self.rcallback(packet, reply, udata)
if not self.internal:
self.state = self.NOT_RUNNING
return self.state == self.NOT_RUNNING or \
self.state == self.PAUSED
def join(self):
if self.sequencer and self.sequencer.isAlive():
self.sequencer.stop()
return SequenceContext
| gpl-2.0 | -7,979,146,477,143,538,000 | 35.36236 | 80 | 0.455388 | false |
adamrvfisher/TechnicalAnalysisLibrary | OptimizedShortVol.py | 1 | 2647 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 23:23:57 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
from pandas_datareader import data
import random as rand
import pandas as pd
import time as t
empty = []
asone = pd.DataFrame()
start = t.time()
iterations = range(0,1000)
s = data.DataReader('^VIX', 'yahoo', start='1/1/1900', end='01/01/2050')
s2 = data.DataReader('^VXV', 'yahoo', start='1/1/1900', end='01/01/2050')
s3 = data.DataReader('VXX', 'yahoo', start='1/1/1900', end='01/01/2050')
s3['LogRet'] = np.log(s3['Adj Close']/s3['Adj Close'].shift(1))
s3['LogRet'] = s3['LogRet'].fillna(0)
s3['Meter'] = s['Close']/s2['Close']
s3['Meter'] = s3['Meter'].fillna(0)
s3['Meter'].plot(grid=True, figsize=(8, 5))
for i in iterations:
a = rand.random()*2
b = rand.random()*2
s3['Touch'] = np.where(s3['Meter'] < a, -1, 0) # short signal
s3['Touch'] = np.where(s3['Meter'] > b, 0, s3['Touch']) #flat signal
s3['Sustain'] = np.where(s3['Touch'].shift(1) == -1, -1, 0) #short
s3['Sustain'] = np.where(s3['Sustain'].shift(1) == -1, -1, #stays
s3['Sustain']) #short
s3['Sustain'] = np.where(s3['Touch'].shift(1) == 0, 0, 0) #flat
s3['Sustain'] = np.where(s3['Sustain'].shift(1) == 0, 0, #stays
s3['Sustain']) #flat
# s3['Sustain'] = np.where(s3['Meter'] < .8, 0, s3['Sustain']) #cover short
s3['Regime'] = s3['Touch'] + s3['Sustain']
s3['Strategy'] = (s3['Regime']).shift(1)*s3['LogRet']
s3['Strategy'] = s3['Strategy'].fillna(0)
endgains = 1
endreturns = 1
# returnstream = []
# gainstream = []
for g in s3['LogRet']:
slate = endreturns * (1+-g)
# returnstream.append(slate)
endreturns = slate
for h in s3['Strategy']:
otherslate = endgains * (1+h)
# gainstream.append(otherslate)
endgains = otherslate
if endreturns > endgains:
continue
empty.append(a)
empty.append(b)
empty.append(endreturns)
empty.append(endgains)
emptyseries = pd.Series(empty)
asone[i] = emptyseries.values
empty[:] = []
end = t.time()
z = asone.iloc[3]
w = np.percentile(z, 99.2)
v = [] #this variable stores the Nth percentile of top performers
u = pd.DataFrame() #this variable stores your financial advisors
for i in z:
if i > w:
v.append(i)
for i in v:
r = asone.columns[(asone == i).iloc[3]]
u = pd.concat([u,asone[r]], axis = 1)
y = max(z)
x = asone.columns[(asone == y).iloc[3]] #this is the column number
print(asone[x]) #this is the dataframe index based on column number
print(end-start) | apache-2.0 | -4,576,178,473,117,482,500 | 34.783784 | 78 | 0.585946 | false |
kimhungGCZ/combinedAL | nab/detectors/relative_entropy/relative_entropy_detector.py | 1 | 7088 | # ----------------------------------------------------------------------
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
from scipy import stats
from nab.detectors.base import AnomalyDetector
class RelativeEntropyDetector(AnomalyDetector):
""" This detector is an implementation of online anomaly detection using
Relative Entropy statistic with multiple hypotheses as described in
Figure 1 of Wang, Chengwei, et al. "Statistical Techniques for Online
Anomaly Detection in Data Centers", Proceedings of the 8th ACM
international conference on Autonomic computing. ACM, 2011.
The algorithm is based on a hypothesis testing approach that compares
observed data against multiple null hypotheses, representing frequencies of
quantized data over a window. If the observed data is unseen and does not
agree with any existing hypothesis, it is declared anomalous and a new
hypothesis is created. Otherwise, it is declared non-anomalous, provided that
the accepted hypothesis occurs frequently enough. Decision to accept/reject a
null hypothesis is based on relative entropy compared against a threshold
of acceptable false negative probability determined by the chi-squared
distribution. Step-by-step details are given in code comments and parameters
have been tuned for best performance of NAB.
"""
def __init__(self, *args, **kwargs):
""" Variable names are kept consistent with algorithm's pseudo code in
the paper."""
super(RelativeEntropyDetector, self).__init__(*args, **kwargs)
# Timeseries of the metric on which anomaly needs to be detected
self.util = []
# Number of bins into which util is to be quantized
self.N_bins = 5.0
# Window size
self.W = 52
# Threshold against which the test statistic is compared. It is set to
# the point in the chi-squared cdf with N-bins -1 degrees of freedom that
# corresponds to 0.99.
self.T = stats.chi2.isf(0.01, self.N_bins - 1)
# Threshold to determine if hypothesis has occured frequently enough
self.c_th = 1
# Tracks the current number of null hypothesis
self.m = 0
# Step size in time series quantization
self.stepSize = (self.inputMax - self.inputMin) / self.N_bins
# List of lists where P[i] indicates the empirical frequency of the ith
# hypothesis.
self.P = []
# List where c[i] tracks the number of windows that agree with P[i]
self.c = []
def handleRecord(self, inputData):
""" Returns a list of [anomalyScore] that takes a binary value of 0 or 1.
The anomalyScore is determined based on the agreement of the observed data
with existing hypotheses that occur frequently enough. Threshold to
accept/reject a null hypothesis and declare an anomaly is determined by
comparing the relative entropy of the observed data and all null
hypothesis against the point on chi-squared distribution that
corresponds to 0.99 (probability of incorrectly rejecting a
null-hypothesis).
"""
anomalyScore = 0.0
self.util.append(inputData["value"])
# This check is for files where self.inputMin == self.input max i.e
# all data points are identical and stepSize is 0 e.g
# artificalNoAnomaly/art_flatline.csv file. Every point in such files
# is declared non-anomolous.
if self.stepSize != 0.0:
# All points in the first window are declared non-anomolous and
# anomaly detection begins when length of data points seen is
# greater than window length.
if len(self.util) >= self.W:
# Extracting current window
util_current = self.util[-self.W:]
# Quantize window data points into discretized bin values
B_current = [math.ceil((c - self.inputMin) / self.stepSize) for c in
util_current]
# Create a histogram of empirical frequencies for the current window
# using B_current
P_hat = numpy.histogram(B_current,
bins=int(self.N_bins),
range=(0,self.N_bins),
density=True)[0]
# This is for the first null hypothesis
if self.m == 0:
self.P.append(P_hat)
self.c.append(1)
self.m = 1
else:
index = self.getAgreementHypothesis(P_hat)
# Check if any null hypothesis is accepted or rejected
if index != -1:
# If hypothesis accepted, update counter for hypothesis that tracks
# number of windows that have agreed to it so far.
self.c[index] += 1
# Check if hypothesis accepted occurs at least as frequently as
# the given threshold. If not, classify data point as anomolous.
if self.c[index] <= self.c_th:
anomalyScore = 1.0
else:
# If all null hypothesis rejected, create new hypothesis based
# on current window and update variables tracking hypothesis counts.
anomalyScore = 1.0
self.P.append(P_hat)
self.c.append(1)
self.m += 1
return [anomalyScore]
def getAgreementHypothesis(self,P_hat):
"""This function computes multinomial goodness-of-fit test. It calculates
the relative entropy test statistic between P_hat and all `m` null
hypothesis and compares it against the threshold `T` based on cdf of
chi-squared distribution. The test relies on the observation that if the
null hypothesis P is true, then as the number of samples grow the relative
entropy converges to a chi-squared distribution1 with K-1 degrees of
freedom.
The function returns the index of hypothesis that agrees with minimum
relative entropy. If all hypotheses disagree, the function returns -1.
@param P_hat (list) Empirical frequencies of the current window.
@return index (int) Index of the hypothesis with the minimum test
statistic.
"""
index = -1
minEntropy = float("inf")
for i in xrange(self.m):
entropy = 2 * self.W * stats.entropy(P_hat,self.P[i])
if entropy < self.T and entropy < minEntropy:
minEntropy = entropy
index = i
return index
| agpl-3.0 | -3,531,688,756,232,306,000 | 37.945055 | 80 | 0.670147 | false |
MediaKraken/mkarchive | subprogram_reactor.py | 1 | 6621 | '''
Copyright (C) 2015 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import logging # pylint: disable=W0611
from twisted.internet import ssl
from twisted.internet import reactor
from twisted.internet.protocol import Factory
import pika
from pika import exceptions
from pika.adapters import twisted_connection
from twisted.internet import defer, reactor, protocol,task
from network import network_base as network_base
from network import network_base_amqp as network_amqp
from common import common_config_ini
from common import common_docker
from common import common_logging
from common import common_signal
import time
import subprocess
import json
import uuid
mk_containers = {}
docker_inst = common_docker.CommonDocker()
@defer.inlineCallbacks
def run(connection):
channel = yield connection.channel()
exchange = yield channel.exchange_declare(exchange='mkque_ex', type='direct', durable=True)
queue = yield channel.queue_declare(queue='mkque', durable=True)
yield channel.queue_bind(exchange='mkque_ex', queue='mkque')
yield channel.basic_qos(prefetch_count=1)
queue_object, consumer_tag = yield channel.basic_consume(queue='mkque', no_ack=False)
l = task.LoopingCall(read, queue_object)
l.start(0.01)
@defer.inlineCallbacks
def read(queue_object):
global mk_containers
logging.info('here I am in consume - read')
ch, method, properties, body = yield queue_object.get()
if body:
logging.info("body %s", body)
#network_base.NetworkEvents.ampq_message_received(body)
json_message = json.loads(body)
logging.info('json body %s', json_message)
if json_message['Type'] == 'Pause':
if json_message['Sub'] == 'Cast':
pass
elif json_message['Type'] == 'Play':
# to address the 30 char name limit for container
name_container = ((json_message['User'] + '_' + str(uuid.uuid4()).replace('-',''))[-30:])
logging.info('cont %s', name_container)
# TODO only for now until I get the device for websessions (cookie perhaps?)
if 'Device' in json_message:
define_new_container = (name_container, json_message['Device'],
json_message['Target'], json_message['Data'])
else:
define_new_container = (name_container, None,
json_message['Target'], json_message['Data'])
logging.info('def %s', define_new_container)
if json_message['User'] in mk_containers:
user_activity_list = mk_containers[json_message['User']]
user_activity_list.append(define_new_container)
mk_containers[json_message['User']] = user_activity_list
else:
# "double list" so each one is it's own instance
mk_containers[json_message['User']] = (define_new_container)
logging.info('dict %s', mk_containers)
if json_message['Sub'] == 'Cast':
# should only need to check for subs on initial play command
if 'Subtitle' in json_message:
subtitle_command = ' -subtitles ' + json_message['Subtitle']\
+ ' -subtitles_language ' + json_message['Language']
else:
subtitle_command = ''
logging.info('b4 cast run')
try:
docker_inst.com_docker_run_container(container_name=name_container,
container_command=('python /mediakraken/stream2chromecast/stream2chromecast.py'
+ ' -devicename ' + json_message['Target']
+ subtitle_command + ' -transcodeopts \'-c:v copy -c:a ac3'
+ ' -movflags faststart+empty_moov\' -transcode \'' + json_message['Data'] + '\''))
except Exception as e:
logging.error('cast ex %s', str(e))
logging.info('after cast run')
else:
logging.info('b4 run')
docker_inst.com_docker_run_container(container_name=name_container,
container_command=(
'ffmpeg -i \'' + json_message['Data'] + '\''))
logging.info('after run')
elif json_message['Type'] == 'Stop':
pass
yield ch.basic_ack(delivery_tag=method.delivery_tag)
class MediaKrakenServerApp(Factory):
def __init__(self):
# start logging
common_logging.com_logging_start('./log/MediaKraken_Subprogram_Reactor')
# set other data
self.server_start_time = time.mktime(time.gmtime())
self.users = {} # maps user names to network instances
self.option_config_json, self.db_connection = common_config_ini.com_config_read()
logging.info("Ready for connections!")
def buildProtocol(self, addr):
return network_base.NetworkEvents(self.users, self.db_connection)
if __name__ == '__main__':
# set signal exit breaks
common_signal.com_signal_set_break()
# fire off wait for it script to allow rabbitmq connection
wait_pid = subprocess.Popen(['/mediakraken/wait-for-it-ash.sh', '-h', 'mkrabbitmq', '-p', ' 5672'], shell=False)
wait_pid.wait()
# pika rabbitmq connection
parameters = pika.ConnectionParameters(credentials=pika.PlainCredentials('guest', 'guest'))
cc = protocol.ClientCreator(reactor, twisted_connection.TwistedProtocolConnection, parameters)
d = cc.connectTCP('mkrabbitmq', 5672)
d.addCallback(lambda protocol: protocol.ready)
d.addCallback(run)
# setup for the ssl keys
reactor.listenSSL(8903, MediaKrakenServerApp(),
ssl.DefaultOpenSSLContextFactory('./key/privkey.pem', './key/cacert.pem'))
reactor.run()
| gpl-2.0 | 8,269,457,984,934,147,000 | 43.736486 | 116 | 0.632533 | false |
vbshah1992/microblog | flask/lib/python2.7/site-packages/whoosh/lang/snowball/spanish.py | 74 | 10978 | from .bases import _StandardStemmer
from whoosh.compat import u
class SpanishStemmer(_StandardStemmer):
"""
The Spanish Snowball stemmer.
:cvar __vowels: The Spanish vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2a_suffixes: Suffixes to be deleted in step 2a of the algorithm.
:type __step2a_suffixes: tuple
:cvar __step2b_suffixes: Suffixes to be deleted in step 2b of the algorithm.
:type __step2b_suffixes: tuple
:cvar __step3_suffixes: Suffixes to be deleted in step 3 of the algorithm.
:type __step3_suffixes: tuple
:note: A detailed description of the Spanish
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/spanish/stemmer.html
"""
__vowels = u("aeiou\xE1\xE9\xED\xF3\xFA\xFC")
__step0_suffixes = ("selas", "selos", "sela", "selo", "las",
"les", "los", "nos", "me", "se", "la", "le",
"lo")
__step1_suffixes = ('amientos', 'imientos', 'amiento', 'imiento',
'aciones', 'uciones', 'adoras', 'adores',
'ancias', u('log\xEDas'), 'encias', 'amente',
'idades', 'anzas', 'ismos', 'ables', 'ibles',
'istas', 'adora', u('aci\xF3n'), 'antes',
'ancia', u('log\xEDa'), u('uci\xf3n'), 'encia',
'mente', 'anza', 'icos', 'icas', 'ismo',
'able', 'ible', 'ista', 'osos', 'osas',
'ador', 'ante', 'idad', 'ivas', 'ivos',
'ico',
'ica', 'oso', 'osa', 'iva', 'ivo')
__step2a_suffixes = ('yeron', 'yendo', 'yamos', 'yais', 'yan',
'yen', 'yas', 'yes', 'ya', 'ye', 'yo',
u('y\xF3'))
__step2b_suffixes = (u('ar\xEDamos'), u('er\xEDamos'), u('ir\xEDamos'),
u('i\xE9ramos'), u('i\xE9semos'), u('ar\xEDais'),
'aremos', u('er\xEDais'), 'eremos',
u('ir\xEDais'), 'iremos', 'ierais', 'ieseis',
'asteis', 'isteis', u('\xE1bamos'),
u('\xE1ramos'), u('\xE1semos'), u('ar\xEDan'),
u('ar\xEDas'), u('ar\xE9is'), u('er\xEDan'),
u('er\xEDas'), u('er\xE9is'), u('ir\xEDan'),
u('ir\xEDas'), u('ir\xE9is'),
'ieran', 'iesen', 'ieron', 'iendo', 'ieras',
'ieses', 'abais', 'arais', 'aseis',
u('\xE9amos'), u('ar\xE1n'), u('ar\xE1s'),
u('ar\xEDa'), u('er\xE1n'), u('er\xE1s'),
u('er\xEDa'), u('ir\xE1n'), u('ir\xE1s'),
u('ir\xEDa'), 'iera', 'iese', 'aste', 'iste',
'aban', 'aran', 'asen', 'aron', 'ando',
'abas', 'adas', 'idas', 'aras', 'ases',
u('\xEDais'), 'ados', 'idos', 'amos', 'imos',
'emos', u('ar\xE1'), u('ar\xE9'), u('er\xE1'),
u('er\xE9'), u('ir\xE1'), u('ir\xE9'), 'aba',
'ada', 'ida', 'ara', 'ase', u('\xEDan'),
'ado', 'ido', u('\xEDas'), u('\xE1is'),
u('\xE9is'), u('\xEDa'), 'ad', 'ed', 'id',
'an', u('i\xF3'), 'ar', 'er', 'ir', 'as',
u('\xEDs'), 'en', 'es')
__step3_suffixes = ("os", "a", "e", "o", u("\xE1"),
u("\xE9"), u("\xED"), u("\xF3"))
def stem(self, word):
"""
Stem a Spanish word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step1_success = False
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if word.endswith(suffix):
if rv.endswith(suffix):
if rv[:-len(suffix)].endswith((u("i\xE9ndo"),
u("\xE1ndo"),
u("\xE1r"), u("\xE9r"),
u("\xEDr"))):
word = (word[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
r1 = (r1[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
r2 = (r2[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
rv = (rv[:-len(suffix)].replace(u("\xE1"), "a")
.replace(u("\xE9"), "e")
.replace(u("\xED"), "i"))
elif rv[:-len(suffix)].endswith(("ando", "iendo",
"ar", "er", "ir")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[:-len(suffix)].endswith("yendo") and
word[:-len(suffix)].endswith("uyendo")):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic", "ad")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("adora", "ador", u("aci\xF3n"), "adoras",
"adores", "aciones", "ante", "antes",
"ancia", "ancias"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in (u("log\xEDa"), u("log\xEDas")):
word = word.replace(suffix, "log")
rv = rv.replace(suffix, "log")
elif suffix in (u("uci\xF3n"), "uciones"):
word = word.replace(suffix, "u")
rv = rv.replace(suffix, "u")
elif suffix in ("encia", "encias"):
word = word.replace(suffix, "ente")
rv = rv.replace(suffix, "ente")
elif suffix == "mente":
word = word[:-5]
r2 = r2[:-5]
rv = rv[:-5]
if r2.endswith(("ante", "able", "ible")):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("idad", "idades"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
for pre_suff in ("abil", "ic", "iv"):
if r2.endswith(pre_suff):
word = word[:-len(pre_suff)]
rv = rv[:-len(pre_suff)]
elif suffix in ("ivo", "iva", "ivos", "ivas"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2a: Verb suffixes beginning 'y'
if not step1_success:
for suffix in self.__step2a_suffixes:
if (rv.endswith(suffix) and
word[-len(suffix) - 1:-len(suffix)] == "u"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2b: Other verb suffixes
for suffix in self.__step2b_suffixes:
if rv.endswith(suffix):
if suffix in ("en", "es", u("\xE9is"), "emos"):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word.endswith("gu"):
word = word[:-1]
if rv.endswith("gu"):
rv = rv[:-1]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3: Residual suffix
for suffix in self.__step3_suffixes:
if rv.endswith(suffix):
if suffix in ("e", u("\xE9")):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
if word[-2:] == "gu" and rv[-1] == "u":
word = word[:-1]
else:
word = word[:-len(suffix)]
break
word = (word.replace(u("\xE1"), "a").replace(u("\xE9"), "e")
.replace(u("\xED"), "i").replace(u("\xF3"), "o")
.replace(u("\xFA"), "u"))
return word
| bsd-3-clause | -8,039,430,257,987,905,000 | 43.266129 | 80 | 0.363181 | false |
ghislainv/deforestprob | forestatrisk/data.py | 1 | 8624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# author :Ghislain Vieilledent
# email :[email protected], [email protected]
# web :https://ecology.ghislainv.fr
# python_version :>=2.7
# license :GPLv3
# ==============================================================================
# Import
from __future__ import division, print_function # Python 3 compatibility
import numpy as np
import os
from osgeo import ogr
import ee_hansen
import ee_roadless
from zipfile import ZipFile # To unzip files
import pandas as pd
import pkg_resources
from .miscellaneous import make_dir
try:
from urllib.request import urlretrieve # To download files from internet
except ImportError:
from urllib import urlretrieve # urllib with Python 2
# Extent of a shapefile
def extent_shp(inShapefile):
"""Compute the extent of a shapefile.
This function computes the extent (xmin, xmax, ymin, ymax) of a
shapefile.
:param inShapefile: Path to the input shapefile.
:return: The extent as a tuple (xmin, ymin, xmax, ymax)
"""
inDriver = ogr.GetDriverByName("ESRI Shapefile")
inDataSource = inDriver.Open(inShapefile, 0)
inLayer = inDataSource.GetLayer()
extent = inLayer.GetExtent()
extent = (extent[0], extent[2], extent[1], extent[3])
return(extent) # (xmin, ymin, xmax, ymax)
# country
def country(iso3, monthyear, proj="EPSG:3395",
data_country=True,
keep_data_raw=False,
fcc_source="gfc", perc=50,
gs_bucket=None):
"""Function formating the country data.
This function downloads, computes and formats the country data.
:param iso3: Country ISO 3166-1 alpha-3 code.
:param proj: Projection definition (EPSG, PROJ.4, WKT) as in
GDAL/OGR. Default to "EPSG:3395" (World Mercator).
:param monthyear: Date (month and year) for WDPA data
(e.g. "Aug2017").
:param data_country: Boolean for running data_country.sh to
compute country landscape variables. Default to "True".
:param keep_data_raw: Boolean to keep the data_raw folder. Default
to "False".
:param fcc_source: Source for forest-cover change data. Can be
"gfc" (Global Forest Change 2015 Hansen data) or
"roadless". Default to "gfc".
:param perc: Tree cover percentage threshold to define forest
(online used if fcc_source="gcf").
:param gs_bucket: Name of the google storage bucket to use.
"""
# Identify continent and country from iso3
print("Identify continent and country from iso3")
# Geofabrik data
file_geofab = pkg_resources.resource_filename("forestatrisk",
"data/ctry_geofab.csv")
data_geofab = pd.read_csv(file_geofab, sep=";", header=0)
# Country
ctry_link_geofab = data_geofab.ctry_link[data_geofab.iso3 == iso3]
ctry_link_geofab = ctry_link_geofab.iloc[0]
# Continent
continent = data_geofab.continent[data_geofab.iso3 == iso3]
continent = continent.iloc[0].lower()
# Create data_raw directory
print("Create data_raw directory")
make_dir("data_raw")
# Download the zipfile from gadm.org
print("Download data")
url = "http://biogeo.ucdavis.edu/data/gadm2.8/shp/" + iso3 + "_adm_shp.zip"
fname = "data_raw/" + iso3 + "_adm_shp.zip"
urlretrieve(url, fname)
# Extract files from zip
print("Extract files from zip")
destDir = "data_raw"
f = ZipFile(fname)
f.extractall(destDir)
f.close()
print("Files extracted")
# Reproject
cmd = "ogr2ogr -overwrite -s_srs EPSG:4326 -t_srs '" + proj + "' -f 'ESRI Shapefile' \
-lco ENCODING=UTF-8 data_raw/ctry_PROJ.shp data_raw/" + iso3 + "_adm0.shp"
os.system(cmd)
# Compute extent
print("Compute extent")
extent_latlong = extent_shp("data_raw/" + iso3 + "_adm0.shp")
extent_proj = extent_shp("data_raw/ctry_PROJ.shp")
# Region with buffer of 5km
print("Region with buffer of 5km")
xmin_reg = np.floor(extent_proj[0] - 5000)
ymin_reg = np.floor(extent_proj[1] - 5000)
xmax_reg = np.ceil(extent_proj[2] + 5000)
ymax_reg = np.ceil(extent_proj[3] + 5000)
extent_reg = (xmin_reg, ymin_reg, xmax_reg, ymax_reg)
extent = " ".join(map(str, extent_reg))
# Tiles for SRTM data (see http://dwtkns.com/srtm/)
print("Tiles for SRTM data")
# SRTM tiles are 5x5 degrees
# x: -180/+180
# y: +60/-60
xmin_latlong = np.floor(extent_latlong[0])
ymin_latlong = np.floor(extent_latlong[1])
xmax_latlong = np.ceil(extent_latlong[2])
ymax_latlong = np.ceil(extent_latlong[3])
# Compute SRTM tile numbers
tile_left = np.int(np.ceil((xmin_latlong + 180.0) / 5.0))
tile_right = np.int(np.ceil((xmax_latlong + 180.0) / 5.0))
if (tile_right == tile_left):
# Trick to make curl globbing work in data_country.sh
tile_right = tile_left + 1
tile_top = np.int(np.ceil((-ymax_latlong + 60.0) / 5.0))
tile_bottom = np.int(np.ceil((-ymin_latlong + 60.0) / 5.0))
if (tile_bottom == tile_top):
tile_bottom = tile_top + 1
# Format variables, zfill is for having 01 and not 1
tiles_long = str(tile_left).zfill(2) + "-" + str(tile_right).zfill(2)
tiles_lat = str(tile_top).zfill(2) + "-" + str(tile_bottom).zfill(2)
# Google EarthEngine task
if (fcc_source == "gfc"):
# Check data availability
data_availability = ee_hansen.check(gs_bucket, iso3)
# If not available, run GEE
if data_availability is False:
print("Run Google Earth Engine")
task = ee_hansen.run_task(perc=perc, iso3=iso3,
extent_latlong=extent_latlong,
scale=30,
proj=proj,
gs_bucket=gs_bucket)
print("GEE running on the following extent:")
print(str(extent_latlong))
# Google EarthEngine task
if (fcc_source == "roadless"):
# Check data availability
data_availability = ee_roadless.check(gs_bucket, iso3)
# If not available, run GEE
if data_availability is False:
print("Run Google Earth Engine")
task = ee_roadless.run_task(iso3=iso3,
extent_latlong=extent_latlong,
scale=30,
proj=proj,
gs_bucket=gs_bucket)
print("GEE running on the following extent:")
print(str(extent_latlong))
# Call data_country.sh
if (data_country):
script = pkg_resources.resource_filename("forestatrisk",
"shell/data_country.sh")
args = ["sh ", script, continent, ctry_link_geofab, iso3,
"'" + proj + "'",
"'" + extent + "'", tiles_long, tiles_lat, monthyear]
cmd = " ".join(args)
os.system(cmd)
# Forest computations
if (fcc_source == "gfc"):
# Download Google EarthEngine results
print("Download Google Earth Engine results locally")
ee_hansen.download(gs_bucket, iso3,
path="data_raw")
# Call forest_country.sh
print("Forest computations")
script = pkg_resources.resource_filename("forestatrisk",
"shell/forest_country.sh")
args = ["sh ", script, "'" + proj + "'", "'" + extent + "'"]
cmd = " ".join(args)
os.system(cmd)
# Forest computations
if (fcc_source == "roadless"):
# Download Google EarthEngine results
print("Download Google Earth Engine results locally")
ee_roadless.download(gs_bucket, iso3,
path="data_raw")
# Call forest_country.sh
print("Forest computations")
script = pkg_resources.resource_filename("forestatrisk",
"shell/forest_country.sh")
args = ["sh ", script, "'" + proj + "'", "'" + extent + "'"]
cmd = " ".join(args)
os.system(cmd)
# Delete data_raw
if (keep_data_raw is False):
for root, dirs, files in os.walk("data_raw", topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
# End
| gpl-3.0 | -6,452,142,456,088,238,000 | 35.854701 | 90 | 0.577342 | false |
kumauta/cf-php-build-pack | scripts/detect.py | 5 | 1194 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from build_pack_utils import Builder
(Builder()
.configure() # noqa
.default_config()
.user_config()
.done()
.detect()
.ends_with(".php")
.recursive()
.if_found_output('PHP')
.when_not_found_continue()
.done()
.detect()
.by_name('{WEBDIR}')
.if_found_output('STATIC')
.done())
| apache-2.0 | -7,230,200,000,013,928,000 | 35.181818 | 74 | 0.675879 | false |
ktbyers/netmiko | examples/send_command_prompting_expect.py | 1 | 1176 | #!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
cisco1 = {
"device_type": "cisco_ios",
"host": "cisco1.lasthop.io",
"username": "pyclass",
"password": getpass(),
}
command = "del flash:/test4.txt"
net_connect = ConnectHandler(**cisco1)
# CLI Interaction is as follows:
# cisco1#delete flash:/testb.txt
# Delete filename [testb.txt]?
# Delete flash:/testb.txt? [confirm]y
# Use 'send_command' and the 'expect_string' argument (note, expect_string uses
# RegEx patterns). Netmiko will move-on to the next command when the
# 'expect_string' is detected.
# strip_prompt=False and strip_command=False make the output
# easier to read in this context.
output = net_connect.send_command(
command_string=command,
expect_string=r"Delete filename",
strip_prompt=False,
strip_command=False,
)
output += net_connect.send_command(
command_string="\n",
expect_string=r"confirm",
strip_prompt=False,
strip_command=False,
)
output += net_connect.send_command(
command_string="y", expect_string=r"#", strip_prompt=False, strip_command=False
)
net_connect.disconnect()
print()
print(output)
print()
| mit | -5,958,477,509,598,286,000 | 25.133333 | 83 | 0.708333 | false |
hairychris/aiohttp-sse | examples/simple.py | 1 | 1538 | import asyncio
from aiohttp.web import Application, Response
from aiohttp_sse import EventSourceResponse
@asyncio.coroutine
def hello(request):
resp = EventSourceResponse()
resp.start(request)
for i in range(0, 100):
print('foo')
yield from asyncio.sleep(1, loop=loop)
resp.send('foo {}'.format(i))
resp.stop_streaming()
return resp
@asyncio.coroutine
def index(request):
d = b"""
<html>
<head>
<script type="text/javascript"
src="http://code.jquery.com/jquery.min.js"></script>
<script type="text/javascript">
var evtSource = new EventSource("/hello");
evtSource.onmessage = function(e) {
$('#response').html(e.data);
}
</script>
</head>
<body>
<h1>Response from server:</h1>
<div id="response"></div>
</body>
</html>
"""
resp = Response(body=d)
return resp
@asyncio.coroutine
def init(loop):
app = Application(loop=loop)
app.router.add_route('GET', '/hello', hello)
app.router.add_route('GET', '/index', index)
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 8080)
print("Server started at http://127.0.0.1:8080")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
| apache-2.0 | -3,035,414,821,445,613,000 | 24.213115 | 68 | 0.596229 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/vpn_client_parameters.py | 1 | 2529 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VpnClientParameters(Model):
"""Vpn Client Parameters for package generation.
:param processor_architecture: VPN client Processor Architecture. Possible
values are: 'AMD64' and 'X86'. Possible values include: 'Amd64', 'X86'
:type processor_architecture: str or
~azure.mgmt.network.v2017_08_01.models.ProcessorArchitecture
:param authentication_method: VPN client Authentication Method. Possible
values are: 'EAPTLS' and 'EAPMSCHAPv2'. Possible values include: 'EAPTLS',
'EAPMSCHAPv2'
:type authentication_method: str or
~azure.mgmt.network.v2017_08_01.models.AuthenticationMethod
:param radius_server_auth_certificate: The public certificate data for the
radius server authentication certificate as a Base-64 encoded string.
Required only if external radius authentication has been configured with
EAPTLS authentication.
:type radius_server_auth_certificate: str
:param client_root_certificates: A list of client root certificates public
certificate data encoded as Base-64 strings. Optional parameter for
external radius based authentication with EAPTLS.
:type client_root_certificates: list[str]
"""
_attribute_map = {
'processor_architecture': {'key': 'processorArchitecture', 'type': 'str'},
'authentication_method': {'key': 'authenticationMethod', 'type': 'str'},
'radius_server_auth_certificate': {'key': 'radiusServerAuthCertificate', 'type': 'str'},
'client_root_certificates': {'key': 'clientRootCertificates', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VpnClientParameters, self).__init__(**kwargs)
self.processor_architecture = kwargs.get('processor_architecture', None)
self.authentication_method = kwargs.get('authentication_method', None)
self.radius_server_auth_certificate = kwargs.get('radius_server_auth_certificate', None)
self.client_root_certificates = kwargs.get('client_root_certificates', None)
| mit | -7,360,390,994,237,454,000 | 49.58 | 96 | 0.680111 | false |
KyleKing/My-Programming-Sketchbook | Assorted_Snippets/python/cli_prototyping/proto_04.py | 1 | 3705 | """Experimenting with cmd2 modular command sets."""
import argparse
from functools import partial
from typing import List
from cmd2 import Cmd, Cmd2ArgumentParser, CommandSet, Statement, with_argparser, with_category, with_default_category
from cmd2.table_creator import BorderedTable, Column
@with_default_category('Fruits')
class LoadableFruits(CommandSet):
def __init__(self):
super().__init__()
def do_apple(self, _: Statement):
self._cmd.poutput('Apple')
def do_banana(self, _: Statement):
self._cmd.poutput('Banana')
@with_default_category('Vegetables')
class LoadableVegetables(CommandSet):
def __init__(self):
super().__init__()
def do_arugula(self, _: Statement):
self._cmd.poutput('Arugula')
def do_bokchoy(self, _: Statement):
self._cmd.poutput('Bok Choy')
class ExampleApp(Cmd):
"""CommandSets are loaded via the `load` and `unload` commands."""
def __init__(self, *args, **kwargs):
# gotta have this or neither the plugin or cmd2 will initialize
super().__init__(*args, auto_load_commands=False, **kwargs)
self._fruits = LoadableFruits()
self._vegetables = LoadableVegetables()
load_parser = Cmd2ArgumentParser()
load_parser.add_argument('cmds', choices=['fruits', 'vegetables'])
@with_argparser(load_parser)
@with_category('Command Loading')
def do_load(self, ns: argparse.Namespace):
if ns.cmds == 'fruits':
try:
self.register_command_set(self._fruits)
self.poutput('Fruits loaded')
except ValueError:
self.poutput('Fruits already loaded')
if ns.cmds == 'vegetables':
try:
self.register_command_set(self._vegetables)
self.poutput('Vegetables loaded')
except ValueError:
self.poutput('Vegetables already loaded')
@with_argparser(load_parser)
def do_unload(self, ns: argparse.Namespace):
if ns.cmds == 'fruits':
self.unregister_command_set(self._fruits)
self.poutput('Fruits unloaded')
if ns.cmds == 'vegetables':
self.unregister_command_set(self._vegetables)
self.poutput('Vegetables unloaded')
# ======================================================================================================
# PLANNED: This is just a cool looking table output that may be useful
def create_cmd_table(table_data: List[List[str]], width: int = 15) -> BorderedTable:
"""Create a bordered table for cmd2 output.
Args:
table_data: list of lists with the string data to display
width: integer width of the columns. Default is 15 which generally works for ~4 columns
Returns:
BorderedTable: generated table for printing
"""
columns = table_data[0]
auto_column = partial(Column, width=width)
bt = BorderedTable([*map(auto_column, columns)])
rows = table_data[1:]
return bt.generate_table(rows)
class ExtendedDisplay: # noqa: H601
"""Extended Display Command Set with New Commands for More Interactive Output."""
def do_display_table(self, statement: Statement) -> None:
"""Display a Table of (TBD) Data.
Args:
statement: results of parsing
"""
# Placeholder sample data
_table_data = [
['Studio', '# Movies', '# PPV', 'Revenue'],
['Netflix', '12', '14', f'{999999:,}'],
['Amazon Prime Video', '12', '14', f'{21450:,}'],
]
self._cmd.poutput(create_cmd_table(_table_data))
if __name__ == '__main__':
app = ExampleApp()
app.cmdloop()
| mit | -302,727,635,497,535,600 | 30.666667 | 117 | 0.59973 | false |
tubaman/django-macaddress | macaddress/fields.py | 2 | 3696 | import django
from django.core.exceptions import ValidationError
from django.db import models
from netaddr import EUI, AddrFormatError
from .formfields import MACAddressField as MACAddressFormField
from . import default_dialect, format_mac, mac_linux
import warnings
class MACAddressField(models.Field):
description = "A MAC address validated by netaddr.EUI"
empty_strings_allowed = False
dialect = None
def __init__(self, *args, **kwargs):
self.integer = kwargs.pop('integer', True)
if not self.integer: # If storing MAC address as string, set max_length to default (17) or use supplied kwarg value.
kwargs['max_length'] = kwargs.get('max_length', 17)
super(MACAddressField, self).__init__(*args, **kwargs)
def deconstruct(self):
''' Django 1.7 migrations require this method
https://docs.djangoproject.com/en/dev/howto/custom-model-fields/#field-deconstruction
'''
name, path, args, kwargs = super(MACAddressField, self).deconstruct()
kwargs['integer'] = self.integer
return name, path, args, kwargs
@classmethod
def set_dialect(cls, new_dialect_clazz):
''' Setting dialect for EUI (MAC addresses) globally to this Field
class.
Class new_dialect_clazz should (finally) extend
netaddr.strategy.eui48.mac_eui48.
'''
warnings.warn(
"The set_dialect method has been deprecated, in favor of the default_dialect utility function and "
" settings.MACADDRESS_DEFAULT_DIALECT. See macaddress.__init__.py source or the project README for "
"more information.",
DeprecationWarning,
)
cls.dialect = new_dialect_clazz
def get_prep_value(self, value):
if value is None:
return None
if not isinstance(value, EUI):
value = self.to_python(value)
if self.integer:
return int(value)
return str(value)
value.dialect = default_dialect(self)
if self.integer:
return int(value)
return str(value)
def get_internal_type(self):
if self.integer:
return 'BigIntegerField'
return 'CharField'
def from_db_value(self, value, expression, connection):
return self.to_python(value)
def to_python(self, value):
if value is None:
return value
if isinstance(value, EUI):
value.dialect = default_dialect(value)
return value
try:
return EUI(value, version=48, dialect=default_dialect())
except (TypeError, ValueError, AddrFormatError):
raise ValidationError("This value must be a valid MAC address.")
def formfield(self, **kwargs):
defaults = {'form_class': MACAddressFormField}
defaults.update(kwargs)
return super(MACAddressField, self).formfield(**defaults)
def get_prep_lookup(self, lookup_type, value):
# data is stored internally as integer so searching as string
# yield 0 result. for example: useful for search in admin.
if lookup_type in ('exact', 'iexact', 'icontains', 'icontains'):
try:
return self.get_prep_value(value)
except AddrFormatError:
return None
elif lookup_type in ('in'):
try:
macs = []
for mac in value:
macs += [self.get_prep_value(mac)]
return macs
except AddrFormatError:
return None
else:
raise TypeError('Lookup type %r not supported.' % lookup_type)
| bsd-3-clause | -8,154,796,764,681,193,000 | 35.235294 | 124 | 0.612554 | false |
mrrrgn/AutobahnPython | examples/asyncio/wamp/basic/pubsub/unsubscribe/backend.py | 5 | 1251 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@asyncio.coroutine
def onJoin(self, details):
counter = 0
while True:
self.publish('com.myapp.topic1', counter)
counter += 1
yield from asyncio.sleep(1)
| apache-2.0 | -3,403,887,023,717,759,000 | 29.512195 | 79 | 0.610711 | false |
FRC-RS/FRS | api/views.py | 1 | 1518 | import json
from django.core.serializers import serialize
from django.apps import apps
import django.http as dhttp
from django.http import JsonResponse, HttpResponseBadRequest
from annoying.decorators import ajax_request
from TBAW.models import Team, Event
from util.viewutils import ajax_success, ajax_failure, require_http_methods_plus, make_querydict_from_request
PAGE_AMOUNT = 50
def error_response(request):
return HttpResponseBadRequest(request)
def object_to_json(obj):
return json.loads(serialize('json', [obj, ]))[0]
def object_list_to_json(obj_list):
return json.loads(serialize('json', obj_list))
@ajax_request
def team_json(request, team_number):
obj = Team.objects.get(team_number=team_number)
print(obj)
return obj
def team_list_json(request, page):
page = int(page)
if page < 1:
return error_response(request)
start_index = PAGE_AMOUNT * (page - 1)
end_index = PAGE_AMOUNT * page
obj = Team.objects.all()[start_index:end_index]
return JsonResponse(object_list_to_json(obj), safe=False)
def event_json(request, event_key):
obj = Event.objects.get(key=event_key)
return JsonResponse(object_to_json(obj), safe=False)
def event_list_json(request, page):
page = int(page)
if page < 1:
return error_response(request)
start_index = PAGE_AMOUNT * (page - 1)
end_index = PAGE_AMOUNT * page
obj = Event.objects.all()[start_index:end_index]
return JsonResponse(object_list_to_json(obj), safe=False)
| mit | 3,201,671,841,173,588,500 | 25.172414 | 109 | 0.711462 | false |
tacaswell/scikit-xray | skbeam/core/fitting/tests/test_lineshapes.py | 5 | 13164 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# @author: Li Li ([email protected]) #
# created on 07/16/2014 #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.testing import (assert_array_almost_equal)
from skbeam.core.fitting import (gaussian, gausssian_step, gaussian_tail,
elastic, compton, lorentzian, lorentzian2,
voigt, pvoigt)
from skbeam.core.fitting import (ComptonModel, ElasticModel)
from skbeam.core.fitting import (gamma_dist, nbinom_dist, poisson_dist)
def test_gauss_peak():
"""
test of gauss function from xrf fit
"""
area = 1
cen = 0
std = 1
x = np.arange(-3, 3, 0.5)
out = gaussian(x, area, cen, std)
y_true = [0.00443185, 0.0175283, 0.05399097, 0.1295176, 0.24197072,
0.35206533, 0.39894228, 0.35206533, 0.24197072, 0.1295176,
0.05399097, 0.0175283]
assert_array_almost_equal(y_true, out)
def test_gauss_step():
"""
test of gaussian step function from xrf fit
"""
y_true = [1.00000000e+00, 1.00000000e+00, 1.00000000e+00,
1.00000000e+00, 9.99999999e-01, 9.99999713e-01,
9.99968329e-01, 9.98650102e-01, 9.77249868e-01,
8.41344746e-01, 5.00000000e-01, 1.58655254e-01,
2.27501319e-02, 1.34989803e-03, 3.16712418e-05]
area = 1
cen = 0
std = 1
x = np.arange(-10, 5, 1)
peak_e = 1.0
out = gausssian_step(x, area, cen, std, peak_e)
assert_array_almost_equal(y_true, out)
def test_gauss_tail():
"""
test of gaussian tail function from xrf fit
"""
y_true = [7.48518299e-05, 2.03468369e-04, 5.53084370e-04, 1.50343919e-03,
4.08677027e-03, 1.11086447e-02, 3.01566200e-02, 8.02175541e-02,
1.87729388e-01, 3.03265330e-01, 2.61578292e-01, 3.75086265e-02,
2.22560560e-03, 5.22170501e-05, 4.72608544e-07]
area = 1
cen = 0
std = 1
x = np.arange(-10, 5, 1)
gamma = 1.0
out = gaussian_tail(x, area, cen, std, gamma)
assert_array_almost_equal(y_true, out)
def test_elastic_peak():
"""
test of elastic peak from xrf fit
"""
y_true = [0.00085311, 0.00164853, 0.00307974, 0.00556237, 0.00971259,
0.01639604, 0.02675911, 0.04222145, 0.06440556, 0.09498223,
0.13542228, 0.18666663, 0.24875512, 0.32048386, 0.39918028,
0.48068522, 0.55960456, 0.62984039, 0.68534389, 0.72096698,
0.73324816, 0.72096698, 0.68534389, 0.62984039, 0.55960456,
0.48068522, 0.39918028, 0.32048386, 0.24875512, 0.18666663,
0.13542228, 0.09498223, 0.06440556, 0.04222145, 0.02675911,
0.01639604, 0.00971259, 0.00556237, 0.00307974, 0.00164853]
area = 1
energy = 10
offset = 0.01
fanoprime = 0.01
e_offset = 0
e_linear = 1
e_quadratic = 0
ev = np.arange(8, 12, 0.1)
out = elastic(ev, area, energy,
offset, fanoprime,
e_offset, e_linear, e_quadratic)
assert_array_almost_equal(y_true, out)
def test_compton_peak():
"""
test of compton peak from xrf fit
"""
y_true = [0.01332237, 0.01536984, 0.01870113, 0.02401014, 0.03223281,
0.04455143, 0.0623487, 0.08709168, 0.12013435, 0.16244524,
0.2142911, 0.27493377, 0.34241693, 0.41352197, 0.48395163,
0.5487556, 0.6029529, 0.64224726, 0.66369326, 0.65792554,
0.63050209, 0.58478146, 0.52510892, 0.45674079, 0.38508357,
0.31500557, 0.25033778, 0.19362201, 0.14610264, 0.10790876,
0.07834781, 0.05623019, 0.04016135, 0.02876383, 0.02081757,
0.01532608, 0.01152704, 0.00886833, 0.00696818, 0.00557234]
energy = 10
offset = 0.01
fano = 0.01
angle = 90
fwhm_corr = 1
amp = 1
f_step = 0
f_tail = 0.1
gamma = 10
hi_f_tail = 0.1
hi_gamma = 1
e_offset = 0
e_linear = 1
e_quadratic = 0
ev = np.arange(8, 12, 0.1)
out = compton(ev, amp, energy, offset, fano,
e_offset, e_linear, e_quadratic, angle,
fwhm_corr, f_step, f_tail,
gamma, hi_f_tail, hi_gamma)
assert_array_almost_equal(y_true, out)
def test_lorentzian_peak():
y_true = [0.03151583, 0.03881828, 0.04897075, 0.06366198, 0.0860297,
0.12242688, 0.18724111, 0.31830989, 0.63661977, 1.59154943,
3.18309886, 1.59154943, 0.63661977, 0.31830989, 0.18724111,
0.12242688, 0.0860297, 0.06366198, 0.04897075, 0.03881828]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out = lorentzian(x, a, cen, std)
assert_array_almost_equal(y_true, out)
def test_lorentzian_squared_peak():
y_true = [3.12037924e-04, 4.73393644e-04, 7.53396180e-04,
1.27323954e-03, 2.32512700e-03, 4.70872613e-03,
1.10141829e-02, 3.18309886e-02, 1.27323954e-01,
7.95774715e-01, 3.18309886e+00, 7.95774715e-01,
1.27323954e-01, 3.18309886e-02, 1.10141829e-02,
4.70872613e-03, 2.32512700e-03, 1.27323954e-03,
7.53396180e-04, 4.73393644e-04]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out = lorentzian2(x, a, cen, std)
assert_array_almost_equal(y_true, out)
def test_voigt_peak():
y_true = [0.03248735, 0.04030525, 0.05136683, 0.06778597, 0.09377683,
0.13884921, 0.22813635, 0.43385822, 0.90715199, 1.65795663,
2.08709281, 1.65795663, 0.90715199, 0.43385822, 0.22813635,
0.13884921, 0.09377683, 0.06778597, 0.05136683, 0.04030525]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
out1 = voigt(x, a, cen, std, gamma=0.1)
out2 = voigt(x, a, cen, std)
assert_array_almost_equal(y_true, out1)
assert_array_almost_equal(y_true, out2)
def test_pvoigt_peak():
y_true = [0.01575792, 0.01940914, 0.02448538, 0.03183099, 0.04301488,
0.06122087, 0.09428971, 0.18131419, 0.58826472, 2.00562834,
3.58626083, 2.00562834, 0.58826472, 0.18131419, 0.09428971,
0.06122087, 0.04301488, 0.03183099, 0.02448538, 0.01940914]
x = np.arange(-1, 1, 0.1)
a = 1
cen = 0
std = 0.1
fraction = 0.5
out = pvoigt(x, a, cen, std, fraction)
assert_array_almost_equal(y_true, out)
def test_elastic_model():
area = 11
energy = 10
offset = 0.02
fanoprime = 0.03
e_offset = 0
e_linear = 0.01
e_quadratic = 0
true_param = [fanoprime, area, energy]
x = np.arange(800, 1200, 1)
out = elastic(x, area, energy, offset, fanoprime,
e_offset, e_linear, e_quadratic)
elastic_model = ElasticModel()
# fwhm_offset is not a sensitive parameter, used as a fixed value
elastic_model.set_param_hint(name='e_offset', value=0, vary=False)
elastic_model.set_param_hint(name='e_linear', value=0.01, vary=False)
elastic_model.set_param_hint(name='e_quadratic', value=0, vary=False)
elastic_model.set_param_hint(name='coherent_sct_energy', value=10,
vary=False)
elastic_model.set_param_hint(name='fwhm_offset', value=0.02, vary=False)
elastic_model.set_param_hint(name='fwhm_fanoprime', value=0.03, vary=False)
result = elastic_model.fit(out, x=x, coherent_sct_amplitude=10)
fitted_val = [result.values['fwhm_fanoprime'],
result.values['coherent_sct_amplitude'],
result.values['coherent_sct_energy']]
assert_array_almost_equal(true_param, fitted_val, decimal=2)
def test_compton_model():
energy = 10
offset = 0.001
fano = 0.01
angle = 90
fwhm_corr = 1
amp = 20
f_step = 0.05
f_tail = 0.1
gamma = 2
hi_f_tail = 0.01
hi_gamma = 1
e_offset = 0
e_linear = 0.01
e_quadratic = 0
x = np.arange(800, 1200, 1.0)
true_param = [energy, amp]
out = compton(x, amp, energy, offset, fano,
e_offset, e_linear, e_quadratic,
angle, fwhm_corr, f_step, f_tail,
gamma, hi_f_tail, hi_gamma)
cm = ComptonModel()
# parameters not sensitive
cm.set_param_hint(name='compton_hi_gamma', value=hi_gamma, vary=False)
cm.set_param_hint(name='fwhm_offset', value=offset, vary=False)
cm.set_param_hint(name='compton_angle', value=angle, vary=False)
cm.set_param_hint(name='e_offset', value=e_offset, vary=False)
cm.set_param_hint(name='e_linear', value=e_linear, vary=False)
cm.set_param_hint(name='e_quadratic', value=e_quadratic, vary=False)
cm.set_param_hint(name='fwhm_fanoprime', value=fano, vary=False)
cm.set_param_hint(name='compton_hi_f_tail', value=hi_f_tail, vary=False)
cm.set_param_hint(name='compton_f_step', value=f_step, vary=False)
cm.set_param_hint(name='compton_f_tail', value=f_tail, vary=False)
cm.set_param_hint(name='compton_gamma', value=gamma, vary=False)
cm.set_param_hint(name='compton_amplitude', value=20, vary=False)
cm.set_param_hint(name='compton_fwhm_corr', value=fwhm_corr, vary=False)
p = cm.make_params()
result = cm.fit(out, x=x, params=p, compton_amplitude=20,
coherent_sct_energy=10)
fit_val = [result.values['coherent_sct_energy'],
result.values['compton_amplitude']]
assert_array_almost_equal(true_param, fit_val, decimal=2)
def test_dist():
M = 1.9 # number of coherent modes
K = 3.15 # number of photons
bin_edges = np.array([0., 0.4, 0.8, 1.2, 1.6, 2.0])
pk_n = nbinom_dist(bin_edges, K, M)
pk_p = poisson_dist(bin_edges, K)
pk_g = gamma_dist(bin_edges, K, M)
assert_array_almost_equal(pk_n, np.array([0.15609113, 0.17669628,
0.18451672, 0.1837303,
0.17729389, 0.16731627]))
assert_array_almost_equal(pk_g, np.array([0., 0.13703903, 0.20090424,
0.22734693, 0.23139384,
0.22222281]))
assert_array_almost_equal(pk_p,
np.array([0.04285213, 0.07642648,
0.11521053, 0.15411372,
0.18795214, 0.21260011]))
| bsd-3-clause | -2,416,997,508,392,550,400 | 36.08169 | 79 | 0.557353 | false |
SSJohns/osf.io | api_tests/users/views/test_user_list.py | 3 | 11797 | # -*- coding: utf-8 -*-
import itsdangerous
import mock
from nose.tools import * # flake8: noqa
import unittest
import urlparse
from modularodm import Q
from tests.base import ApiTestCase
from tests.factories import AuthUserFactory
from api.base.settings.defaults import API_BASE
from framework.auth.cas import CasResponse
from framework.sessions.model import Session
from website.models import User
from website import settings
from website.oauth.models import ApiOAuth2PersonalToken
class TestUsers(ApiTestCase):
def setUp(self):
super(TestUsers, self).setUp()
self.user_one = AuthUserFactory()
self.user_two = AuthUserFactory()
def tearDown(self):
super(TestUsers, self).tearDown()
def test_returns_200(self):
res = self.app.get('/{}users/'.format(API_BASE))
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
def test_find_user_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_two._id, ids)
def test_all_users_in_users(self):
url = "/{}users/".format(API_BASE)
res = self.app.get(url)
user_son = res.json['data']
ids = [each['id'] for each in user_son]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_multiple_in_users(self):
url = "/{}users/?filter[full_name]=fred".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_in(self.user_two._id, ids)
def test_find_single_user_in_users(self):
url = "/{}users/?filter[full_name]=my".format(API_BASE)
self.user_one.fullname = 'My Mom'
self.user_one.save()
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_find_no_user_in_users(self):
url = "/{}users/?filter[full_name]=NotMyMom".format(API_BASE)
res = self.app.get(url)
user_json = res.json['data']
ids = [each['id'] for each in user_json]
assert_not_in(self.user_one._id, ids)
assert_not_in(self.user_two._id, ids)
def test_users_list_takes_profile_image_size_param(self):
size = 42
url = "/{}users/?profile_image_size={}".format(API_BASE, size)
res = self.app.get(url)
user_json = res.json['data']
for user in user_json:
profile_image_url = user['links']['profile_image']
query_dict = urlparse.parse_qs(urlparse.urlparse(profile_image_url).query)
assert_equal(int(query_dict.get('s')[0]), size)
class TestUsersCreate(ApiTestCase):
def setUp(self):
super(TestUsersCreate, self).setUp()
self.user = AuthUserFactory()
self.unconfirmed_email = '[email protected]'
self.base_url = '/{}users/'.format(API_BASE)
self.data = {
'data': {
'type': 'users',
'attributes': {
'username': self.unconfirmed_email,
'full_name': 'Test Account'
}
}
}
def tearDown(self):
super(TestUsersCreate, self).tearDown()
self.app.reset() # clears cookies
User.remove()
@mock.patch('framework.auth.views.mails.send_mail')
def test_user_can_not_create_other_user_or_send_mail(self, mock_mail):
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
'{}?send_email=true'.format(self.base_url),
self.data,
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
def test_cookied_requests_do_not_create_or_email(self, mock_mail):
session = Session(data={'auth_user_id': self.user._id})
session.save()
cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(session._id)
self.app.set_cookie(settings.COOKIE_NAME, str(cookie))
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
self.base_url,
self.data,
expect_errors=True
)
assert_equal(res.status_code, 403)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
@unittest.skipIf(not settings.DEV_MODE, 'DEV_MODE disabled, osf.users.create unavailable') # TODO: Remove when available outside of DEV_MODE
def test_properly_scoped_token_can_create_and_send_email(self, mock_auth, mock_mail):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Authorized Token',
scopes='osf.users.create'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
'{}?send_email=true'.format(self.base_url),
self.data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['username'], self.unconfirmed_email)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 1)
assert_equal(mock_mail.call_count, 1)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
@unittest.skipIf(not settings.DEV_MODE, 'DEV_MODE disabled, osf.users.create unavailable') # TODO: Remove when available outside of DEV_MODE
def test_properly_scoped_token_does_not_send_email_without_kwarg(self, mock_auth, mock_mail):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Authorized Token',
scopes='osf.users.create'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
self.base_url,
self.data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['username'], self.unconfirmed_email)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 1)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
@unittest.skipIf(not settings.DEV_MODE, 'DEV_MODE disabled, osf.users.create unavailable') # TODO: Remove when available outside of DEV_MODE
def test_properly_scoped_token_can_create_without_username_but_not_send_email(self, mock_auth, mock_mail):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Authorized Token',
scopes='osf.users.create'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
self.data['data']['attributes'] = {'full_name': 'No Email'}
assert_equal(User.find(Q('fullname', 'eq', 'No Email')).count(), 0)
res = self.app.post_json_api(
'{}?send_email=true'.format(self.base_url),
self.data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['username'], None)
assert_equal(User.find(Q('fullname', 'eq', 'No Email')).count(), 1)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
def test_improperly_scoped_token_can_not_create_or_email(self, mock_auth, mock_mail):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Unauthorized Token',
scopes='osf.full_write'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
'{}?send_email=true'.format(self.base_url),
self.data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)},
expect_errors=True
)
assert_equal(res.status_code, 403)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
assert_equal(mock_mail.call_count, 0)
@mock.patch('framework.auth.views.mails.send_mail')
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
@unittest.skipIf(not settings.DEV_MODE, 'DEV_MODE disabled, osf.admin unavailable') # TODO: Remove when available outside of DEV_MODE
def test_admin_scoped_token_can_create_and_send_email(self, mock_auth, mock_mail):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Admin Token',
scopes='osf.admin'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 0)
res = self.app.post_json_api(
'{}?send_email=true'.format(self.base_url),
self.data,
headers={'Authorization': 'Bearer {}'.format(token.token_id)}
)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['username'], self.unconfirmed_email)
assert_equal(User.find(Q('username', 'eq', self.unconfirmed_email)).count(), 1)
assert_equal(mock_mail.call_count, 1)
| apache-2.0 | 6,450,712,556,851,597,000 | 37.552288 | 145 | 0.598372 | false |
ucsdlib/python-novice-inflammation | tools/test_check.py | 14 | 20651 | #! /usr/bin/env python
"""
Unit and functional tests for markdown lesson template validator.
Some of these tests require looking for example files, which exist only on
the gh-pages branch. Some tests may therefore fail on branch "core".
"""
import logging
import os
import unittest
import check
# Make log messages visible to help audit test failures
check.start_logging(level=logging.DEBUG)
MARKDOWN_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
class BaseTemplateTest(unittest.TestCase):
"""Common methods for testing template validators"""
SAMPLE_FILE = "" # Path to a file that should pass all tests
VALIDATOR = check.MarkdownValidator
def _create_validator(self, markdown):
"""Create validator object from markdown string; useful for failures"""
return self.VALIDATOR(markdown=markdown)
class TestAstHelpers(BaseTemplateTest):
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, 'index.md')
VALIDATOR = check.MarkdownValidator
def test_link_text_extracted(self):
"""Verify that link text and destination are extracted correctly"""
validator = self._create_validator("""[This is a link](discussion.html)""")
links = validator.ast.find_external_links(validator.ast.children[0])
dest, link_text = validator.ast.get_link_info(links[0])
self.assertEqual(dest, "discussion.html")
self.assertEqual(link_text, "This is a link")
class TestIndexPage(BaseTemplateTest):
"""Test the ability to correctly identify and validate specific sections
of a markdown file"""
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "index.md")
VALIDATOR = check.IndexPageValidator
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
def test_headers_missing_hrs(self):
validator = self._create_validator("""Blank row
layout: lesson
title: Lesson Title
Another section that isn't an HR
""")
self.assertFalse(validator._validate_doc_headers())
def test_headers_missing_a_line(self):
"""One of the required headers is missing"""
validator = self._create_validator("""---
layout: lesson
---""")
self.assertFalse(validator._validate_doc_headers())
# TESTS INVOLVING DOCUMENT HEADER SECTION
def test_headers_fail_with_other_content(self):
validator = self._create_validator("""---
layout: lesson
title: Lesson Title
otherline: Nothing
---""")
self.assertFalse(validator._validate_doc_headers())
def test_fail_when_headers_not_yaml_dict(self):
"""Fail when the headers can't be parsed to a dict of YAML data"""
validator = self._create_validator("""---
This will parse as a string, not a dictionary
---""")
self.assertFalse(validator._validate_doc_headers())
# TESTS INVOLVING SECTION TITLES/HEADINGS
def test_index_has_valid_section_headings(self):
"""The provided index page"""
validator = self._create_validator("""## Topics
1. [Topic Title One](01-one.html)
2. [Topic Title Two](02-two.html)
## Other Resources
* [Reference Guide](reference.html)
* [Next Steps](discussion.html)
* [Instructor's Guide](instructors.html)""")
res = validator._validate_section_heading_order()
self.assertTrue(res)
def test_index_fail_when_section_heading_absent(self):
validator = self._create_validator("""## Topics
1. [Topic Title One](01-one.html)
2. [Topic Title Two](02-two.html)
## Other Resources
* [Reference Guide](reference.html)
* [Next Steps](discussion.html)
* [Instructor's Guide](instructors.html)""")
res = validator.ast.has_section_heading("Fake heading")
self.assertFalse(res)
def test_fail_when_section_heading_is_wrong_level(self):
"""All headings must be exactly level 2"""
validator = self._create_validator("""---
layout: page
title: Lesson Title
---
Paragraph of introductory material.
> ## Prerequisites
>
> A short paragraph describing what learners need to know
> before tackling this lesson.
### Topics
1. [Topic Title 1](01-one.html)
2. [Topic Title 2](02-two.html)
## Other Resources
* [Reference Guide](reference.html)
* [Next Steps](discussion.html)
* [Instructor's Guide](instructors.html)""")
self.assertFalse(validator._validate_section_heading_order())
def test_fail_when_section_headings_in_wrong_order(self):
validator = self._create_validator("""---
layout: lesson
title: Lesson Title
---
Paragraph of introductory material.
> ## Prerequisites
>
> A short paragraph describing what learners need to know
> before tackling this lesson.
## Other Resources
* [Reference Guide](reference.html)
* [Instructor's Guide](instructors.html)
## Topics
* [Topic Title 1](01-one.html)
* [Topic Title 2](02-two.html)""")
self.assertFalse(validator._validate_section_heading_order())
def test_pass_when_prereq_section_has_correct_heading_level(self):
validator = self._create_validator("""---
layout: lesson
title: Lesson Title
---
Paragraph of introductory material.
> ## Prerequisites
>
> A short paragraph describing what learners need to know
> before tackling this lesson.
""")
self.assertTrue(validator._validate_intro_section())
def test_fail_when_prereq_section_has_incorrect_heading_level(self):
validator = self._create_validator("""
> # Prerequisites {.prereq}
>
> A short paragraph describing what learners need to know
> before tackling this lesson.
""")
self.assertFalse(validator._validate_callouts())
# TESTS INVOLVING LINKS TO OTHER CONTENT
def test_should_check_text_of_all_links_in_index(self):
"""Text of every local-html link in index.md should
match dest page title"""
validator = self._create_validator("""
## [This link is in a heading](reference.html)
[Topic Title One](01-one.html#anchor)""")
links = validator.ast.find_external_links()
check_text, dont_check_text = validator._partition_links()
self.assertEqual(len(dont_check_text), 0)
self.assertEqual(len(check_text), 2)
def test_file_links_validate(self):
"""Verify that all links in a sample file validate.
Involves checking for example files; may fail on "core" branch"""
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator._validate_links()
self.assertTrue(res)
def test_html_link_to_extant_md_file_passes(self):
"""Verify that an HTML link with corresponding MD file will pass
Involves checking for example files; may fail on "core" branch"""
validator = self._create_validator("""[Topic Title One](01-one.html)""")
self.assertTrue(validator._validate_links())
def test_html_link_with_anchor_to_extant_md_passes(self):
"""Verify that link is identified correctly even if to page anchor
For now this just tests that the regex handles #anchors.
It doesn't validate that the named anchor exists in the md file
Involves checking for example files; may fail on "core" branch
"""
validator = self._create_validator("""[Topic Title One](01-one.html#anchor)""")
self.assertTrue(validator._validate_links())
def test_inpage_anchor_passes_validation(self):
"""Links that reference anchors within the page should be ignored"""
# TODO: Revisit once anchor rules are available
validator = self._create_validator("""Most databases also support Booleans and date/time values;
SQLite uses the integers 0 and 1 for the former, and represents the latter as discussed [earlier](#a:dates).""")
self.assertTrue(validator._validate_links())
def test_missing_markdown_file_fails_validation(self):
"""Fail validation when an html file is linked without corresponding
markdown file"""
validator = self._create_validator("""[Broken link](nonexistent.html)""")
self.assertFalse(validator._validate_links())
def test_website_link_ignored_by_validator(self):
"""Don't look for markdown if the file linked isn't local-
remote website links are ignored"""
validator = self._create_validator("""[Broken link](http://website.com/filename.html)""")
self.assertTrue(validator._validate_links())
def test_malformed_website_link_fails_validator(self):
"""If the link isn't prefixed by http(s):// or ftp://, fail.
This is because there are a lot of edge cases in distinguishing
between filenames and URLs: err on the side of certainty."""
validator = self._create_validator("""[Broken link](www.website.com/filename.html)""")
self.assertFalse(validator._validate_links())
def test_finds_image_asset(self):
"""Image asset is found in the expected file location
Involves checking for example files; may fail on "core" branch"""
validator = self._create_validator(
"""""")
self.assertTrue(validator._validate_links())
def test_image_asset_not_found(self):
"""Image asset can't be found if path is invalid"""
validator = self._create_validator(
"""""")
self.assertFalse(validator._validate_links())
def test_non_html_link_finds_csv(self):
"""Look for CSV file in appropriate folder
Involves checking for example files; may fail on "core" branch
"""
validator = self._create_validator(
"""Use [this CSV](data/data.csv) for the exercise.""")
self.assertTrue(validator._validate_links())
def test_non_html_links_are_path_sensitive(self):
"""Fails to find CSV file with wrong path."""
validator = self._create_validator(
"""Use [this CSV](data.csv) for the exercise.""")
self.assertFalse(validator._validate_links())
### Tests involving callout/blockquote sections
def test_one_prereq_callout_passes(self):
"""index.md should have one, and only one, prerequisites box"""
validator = self._create_validator("""> ## Prerequisites {.prereq}
>
> What learners need to know before tackling this lesson.
""")
self.assertTrue(validator._validate_callouts())
def test_two_prereq_callouts_fail(self):
"""More than one prereq callout box is not allowed"""
validator = self._create_validator("""> ## Prerequisites {.prereq}
>
> What learners need to know before tackling this lesson.
A spacer paragraph
> ## Prerequisites {.prereq}
>
> A second prerequisites box should cause an error
""")
self.assertFalse(validator._validate_callouts())
def test_callout_without_style_fails(self):
"""A callout box will fail if it is missing the required style"""
validator = self._create_validator("""> ## Prerequisites
>
> What learners need to know before tackling this lesson.
""")
self.assertFalse(validator._validate_callouts())
def test_callout_with_wrong_title_fails(self):
"""A callout box will fail if it has the wrong title"""
validator = self._create_validator("""> ## Wrong title {.prereq}
>
> What learners need to know before tackling this lesson.
""")
self.assertFalse(validator._validate_callouts())
def test_unknown_callout_style_fails(self):
"""A callout whose style is unrecognized by template is invalid"""
validator = self._create_validator("""> ## Any title {.callout}
>
> What learners need to know before tackling this lesson.
""")
callout_node = validator.ast.get_callouts()[0]
self.assertFalse(validator._validate_one_callout(callout_node))
def test_block_ignored_sans_heading(self):
"""
Blockquotes only count as callouts if they have a heading
"""
validator = self._create_validator("""> Prerequisites {.prereq}
>
> What learners need to know before tackling this lesson.
""")
callout_nodes = validator.ast.get_callouts()
self.assertEqual(len(callout_nodes), 0)
def test_callout_heading_must_be_l2(self):
"""Callouts will fail validation if the heading is not level 2"""
validator = self._create_validator("""> ### Prerequisites {.prereq}
>
> What learners need to know before tackling this lesson.
""")
self.assertFalse(validator._validate_callouts())
def test_fail_if_fixme_present_all_caps(self):
"""Validation should fail if a line contains the word FIXME (exact)"""
validator = self._create_validator("""Incomplete sentence (FIXME).""")
self.assertFalse(validator._validate_no_fixme())
def test_fail_if_fixme_present_mixed_case(self):
"""Validation should fail if a line contains the word FIXME
(in any capitalization)"""
validator = self._create_validator("""Incomplete sentence (FiXmE).""")
self.assertFalse(validator._validate_no_fixme())
class TestTopicPage(BaseTemplateTest):
"""Verifies that the topic page validator works as expected"""
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "01-one.md")
VALIDATOR = check.TopicPageValidator
def test_headers_fail_because_invalid_content(self):
"""The value provided as YAML does not match the expected datatype"""
validator = self._create_validator("""---
layout: lesson
title: Lesson Title
subtitle: A page
minutes: not a number
---""")
self.assertFalse(validator._validate_doc_headers())
def test_topic_page_should_have_no_headings(self):
"""Requirement according to spec; may be relaxed in future"""
validator = self._create_validator("""
## Heading that should not be present
Some text""")
self.assertFalse(validator._validate_has_no_headings())
def test_should_not_check_text_of_links_in_topic(self):
"""Never check that text of local-html links in topic
matches dest title """
validator = self._create_validator("""
## [This link is in a heading](reference.html)
[Topic Title One](01-one.html#anchor)""")
links = validator.ast.find_external_links()
check_text, dont_check_text = validator._partition_links()
self.assertEqual(len(dont_check_text), 2)
self.assertEqual(len(check_text), 0)
def test_pass_when_optional_callouts_absent(self):
"""Optional block titles should be optional"""
validator = self._create_validator("""> ## Learning Objectives {.objectives}
>
> * All topic pages must have this callout""")
self.assertTrue(validator._validate_callouts())
def test_callout_style_passes_regardless_of_title(self):
"""Verify that certain kinds of callout box can be recognized solely
by style, regardless of the heading title"""
validator = self._create_validator("""> ## Learning Objectives {.objectives}
>
> * All topic pages must have this callout
> ## Some random title {.callout}
>
> Some informative text""")
self.assertTrue(validator._validate_callouts())
def test_callout_style_allows_duplicates(self):
"""Multiple blockquoted sections with style 'callout' are allowed"""
validator = self._create_validator("""> ## Learning Objectives {.objectives}
>
> * All topic pages must have this callout
> ## Callout box one {.callout}
>
> Some informative text
Spacer paragraph
> ## Callout box two {.callout}
>
> Further exposition""")
self.assertTrue(validator._validate_callouts())
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
class TestReferencePage(BaseTemplateTest):
"""Verifies that the reference page validator works as expected"""
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "reference.md")
VALIDATOR = check.ReferencePageValidator
def test_missing_glossary_definition(self):
validator = self._create_validator("")
self.assertFalse(validator._validate_glossary_entry(
["Key word"]))
def test_missing_colon_at_glossary_definition(self):
validator = self._create_validator("")
self.assertFalse(validator._validate_glossary_entry(
["Key word", "Definition of term"]))
def test_wrong_indentation_at_glossary_definition(self):
validator = self._create_validator("")
self.assertFalse(validator._validate_glossary_entry(
["Key word", ": Definition of term"]))
def test_wrong_continuation_at_glossary_definition(self):
validator = self._create_validator("")
self.assertFalse(validator._validate_glossary_entry(
["Key word", ": Definition of term", "continuation"]))
def test_valid_glossary_definition(self):
validator = self._create_validator("")
self.assertTrue(validator._validate_glossary_entry(
["Key word", ": Definition of term", " continuation"]))
def test_only_definitions_can_appear_after_glossary_heading(self):
validator = self._create_validator("""## Glossary
Key Word 1
: Definition of first term
Paragraph
Key Word 2
: Definition of second term
""")
self.assertFalse(validator._validate_glossary())
def test_glossary(self):
validator = self._create_validator("""## Glossary
Key Word 1
: Definition of first term
Key Word 2
: Definition of second term
""")
self.assertTrue(validator._validate_glossary())
def test_callout_fails_when_none_specified(self):
"""The presence of a callout box should cause validation to fail
when the template doesn't define any recognized callouts
(No "unknown" blockquote sections are allowed)
"""
validator = self._create_validator("""> ## Learning Objectives {.objectives}
>
> * Learning objective 1
> * Learning objective 2""")
self.assertFalse(validator._validate_callouts())
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
class TestInstructorPage(BaseTemplateTest):
"""Verifies that the instructors page validator works as expected"""
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "instructors.md")
VALIDATOR = check.InstructorPageValidator
def test_should_selectively_check_text_of_links_in_topic(self):
"""Only verify that text of local-html links in topic
matches dest title if the link is in a heading"""
validator = self._create_validator("""
## [Reference](reference.html)
[Topic Title One](01-one.html#anchor)""")
check_text, dont_check_text = validator._partition_links()
self.assertEqual(len(dont_check_text), 1)
self.assertEqual(len(check_text), 1)
def test_link_dest_bad_while_text_ignored(self):
validator = self._create_validator("""
[ignored text](nonexistent.html)""")
self.assertFalse(validator._validate_links())
def test_link_dest_good_while_text_ignored(self):
validator = self._create_validator("""
[ignored text](01-one.html)""")
self.assertTrue(validator._validate_links())
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
class TestLicensePage(BaseTemplateTest):
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "LICENSE.md")
VALIDATOR = check.LicensePageValidator
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
def test_modified_file_fails_validation(self):
with open(self.SAMPLE_FILE, 'rU') as f:
orig_text = f.read()
mod_text = orig_text.replace("The", "the")
validator = self._create_validator(mod_text)
self.assertFalse(validator.validate())
class TestDiscussionPage(BaseTemplateTest):
SAMPLE_FILE = os.path.join(MARKDOWN_DIR, "discussion.md")
VALIDATOR = check.DiscussionPageValidator
def test_sample_file_passes_validation(self):
sample_validator = self.VALIDATOR(self.SAMPLE_FILE)
res = sample_validator.validate()
self.assertTrue(res)
if __name__ == "__main__":
unittest.main()
| cc0-1.0 | 7,094,286,710,470,991,000 | 34.790295 | 112 | 0.674786 | false |
andredalton/imecare | imecare/models.py | 1 | 3418 | #!/usr/bin/python
# -*- coding: UTF8 -*-
import re
from django.db import models
from django.contrib.auth.models import User
def verifica_cpf(cpf):
m = re.search("([0-9]{3})\.([0-9]{3})\.([0-9]{3})-([0-9]{2})", cpf)
corpo = map(int, list(m.group(1) + m.group(2) + m.group(3)))
digito = map(int, list(m.group(4)))
mult = zip(corpo, range(10, 1, -1))
soma = sum(map(lambda tup: tup[0]*tup[1], mult))
if ((10*soma) % 11) % 10 == digito[0]:
mult = zip(corpo + [digito[0]], range(11, 1, -1))
soma = sum(map(lambda tup: tup[0]*tup[1], mult))
if ((10*soma) % 11) % 10 == digito[1]:
return True
return False
SANGUE_CHOICES = (
('A+', 'A+'),
('A-', 'A-'),
('AB+', 'AB+'),
('AB-', 'AB-'),
('B+', 'B+'),
('B-', 'B-'),
('O+', 'O+'),
('O-', 'O-')
)
class Pessoa(User):
nome = models.CharField(max_length=150)
rg = models.CharField(max_length=15, verbose_name='RG', unique=True)
cpf = models.CharField(max_length=15, verbose_name='CPF', unique=True)
crm = models.CharField(max_length=15, verbose_name='CRM', unique=True, null=True, blank=True)
tipo_sanguineo = models.CharField(max_length=3, choices=SANGUE_CHOICES, verbose_name='Tipo sanguíneo')
data_nascimento = models.DateField(verbose_name='Data de nascimento')
def save(self):
# Tornando o nome de usuário User igual ao cpf
self.username = self.cpf
if verifica_cpf(self.cpf):
return super(Pessoa, self).save()
class Atendimento(models.Model):
medico = models.ForeignKey(Pessoa, related_name='medico')
paciente = models.ForeignKey(Pessoa, related_name='paciente')
comentarios = models.TextField(verbose_name='comentários', blank=True)
data = models.DateField(auto_now=True)
horario = models.TimeField(auto_now=True)
class Meta:
unique_together = (("medico", "paciente", "data", "horario"),)
def save(self):
if self.medico.is_staff:
return super(Atendimento, self).save()
class Procedimento(models.Model):
nome = models.CharField(max_length=100, primary_key=True)
class Solicita(models.Model):
procedimento = models.ForeignKey(Procedimento)
atendimento = models.ForeignKey(Atendimento)
detalhes = models.TextField(blank=True)
class Realiza(models.Model):
solicitacao = models.ForeignKey(Solicita, null=True, default=None)
procedimento = models.ForeignKey(Procedimento)
paciente = models.ForeignKey(Pessoa)
data = models.DateField(auto_now=True)
horario = models.TimeField(auto_now=True)
class Doenca(models.Model):
nome = models.CharField(max_length=150, unique=True)
cid = models.CharField(max_length=15, primary_key=True)
generica = models.ForeignKey("self", null=True)
class Diagnosticada(models.Model):
atendimento = models.ForeignKey(Atendimento)
doenca = models.ForeignKey(Doenca)
cronica = models.BooleanField(default=False, verbose_name='crônica')
paciente = models.ForeignKey(Pessoa)
inicio = models.DateField(auto_now=True)
fim = models.DateField(null=True, default=None)
class Prontuario(models.Model):
categoria = models.CharField(max_length=20)
paciente = models.ForeignKey(Pessoa, related_name='+')
medico = models.ForeignKey(Pessoa, null=True, related_name='+')
texto = models.CharField(max_length=255)
data = models.DateField()
| mit | -3,920,703,166,612,884,000 | 32.470588 | 106 | 0.650264 | false |
ContinuumIO/flask-kerberos-login | flask_kerberos_login/manager.py | 1 | 4189 | '''
Provides a pluggable login manager that uses Kerberos for authentication
'''
from __future__ import absolute_import, print_function, unicode_literals
import logging
import socket
from flask import _request_ctx_stack as stack
from flask import abort
from flask import request
import kerberos
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def _gssapi_authenticate(token, service_name):
'''
Performs GSSAPI Negotiate Authentication
Parameters:
token (str): GSSAPI Authentication Token
service_name (str): GSSAPI service name
Returns:
tuple of
(str | None) username
(str | None) GSSAPI token
'''
state = None
try:
rc, state = kerberos.authGSSServerInit(service_name)
if rc != kerberos.AUTH_GSS_COMPLETE:
log.warn('Unable to initialize server context')
return None, None
rc = kerberos.authGSSServerStep(state, token)
if rc == kerberos.AUTH_GSS_COMPLETE:
log.debug('Completed GSSAPI negotiation')
return (
kerberos.authGSSServerUserName(state),
kerberos.authGSSServerResponse(state),
)
elif rc == kerberos.AUTH_GSS_CONTINUE:
log.debug('Continuing GSSAPI negotiation')
return kerberos.AUTH_GSS_CONTINUE
else:
log.info('Unable to step server context')
return None, None
except kerberos.GSSError:
log.info('Unable to authenticate', exc_info=True)
return None, None
finally:
if state:
kerberos.authGSSServerClean(state)
def default_save_callback(user):
pass
class KerberosLoginManager(object):
def __init__(self, app=None):
self._save_user = default_save_callback
self._service_name = None
self.app = app
if app is not None:
self.init_app(app)
def save_user(self, callback):
'''
This sets the callback for saving a user that has been loaded from a
kerberos ticket.
'''
self._save_user = callback
return callback
def init_app(self, app):
'''
Initializes the extension with the application object
'''
self.app = app
app.kerberos_manager = self
app.before_request(self.extract_token)
app.after_request(self.append_header)
self.init_config(app.config)
def init_config(self, config):
service = config.setdefault('KRB5_SERVICE_NAME', b'HTTP')
hostname = config.setdefault('KRB5_HOSTNAME', socket.gethostname())
self._service_name = b'{}@{}'.format(service, hostname)
try:
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError:
log.warn("Error initializing Kerberos for %s", self._service_name, exc_info=True)
else:
log.info("Server principal is %s", principal)
def extract_token(self):
'''
Extracts a token from the current HTTP request if it is available.
Invokes the `save_user` callback if authentication is successful.
'''
header = request.headers.get(b'authorization')
if header and header.startswith(b'Negotiate '):
token = header[10:]
user, token = _gssapi_authenticate(token, self._service_name)
if token is not None:
stack.top.kerberos_token = token
if user is not None:
self._save_user(user)
else:
# Invalid Kerberos ticket, we could not complete authentication
abort(403)
def append_header(self, response):
'''
Adds WWW-Authenticate header with SPNEGO challenge or Kerberos token
'''
token = getattr(stack.top, 'kerberos_token', None)
if response.status_code == 401:
# Negotiate is an additional authenticate method.
response.headers.add('WWW-Authenticate', 'Negotiate')
elif token:
response.headers['WWW-Authenticate'] = 'Negotiate {}'.format(token)
return response
| bsd-2-clause | -2,819,058,152,270,959,000 | 29.355072 | 93 | 0.613512 | false |
hirofumi0810/tensorflow_end2end_speech_recognition | models/test/test_joint_ctc_attention.py | 1 | 9620 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import tensorflow as tf
# from tensorflow.python import debug as tf_debug
sys.path.append(os.path.abspath('../../'))
from models.attention.joint_ctc_attention import JointCTCAttention
from models.test.data import generate_data, idx2alpha
from utils.io.labels.phone import Idx2phone
from utils.io.labels.sparsetensor import list2sparsetensor
from utils.parameter import count_total_parameters
from utils.training.learning_rate_controller import Controller
from utils.measure_time_func import measure_time
class TestAttention(tf.test.TestCase):
def test(self):
print("Joint CTC-Attention Working check.")
# self.check(label_type='phone')
self.check(label_type='character')
@measure_time
def check(self, label_type='phone'):
print('==================================================')
print(' label_type: %s' % label_type)
print('==================================================')
tf.reset_default_graph()
with tf.Graph().as_default():
# Load batch data
batch_size = 4
inputs, labels, ctc_labels, inputs_seq_len, labels_seq_len = generate_data(
label_type=label_type,
model='joint_ctc_attention',
batch_size=batch_size)
# Define model graph
num_classes = 27 if label_type == 'character' else 61
model = JointCTCAttention(input_size=inputs[0].shape[1],
encoder_type='blstm',
encoder_num_units=256,
encoder_num_layers=2,
encoder_num_proj=None,
attention_type='dot_product',
attention_dim=128,
decoder_type='lstm',
decoder_num_units=256,
decoder_num_layers=1,
embedding_dim=64,
lambda_weight=0.5,
num_classes=num_classes,
sos_index=num_classes,
eos_index=num_classes + 1,
max_decode_length=100,
use_peephole=True,
splice=1,
parameter_init=0.1,
clip_grad_norm=5.0,
clip_activation_encoder=50,
clip_activation_decoder=50,
weight_decay=1e-8,
time_major=True,
sharpening_factor=1.0,
logits_temperature=1.0)
# Define placeholders
model.create_placeholders()
learning_rate_pl = tf.placeholder(tf.float32, name='learning_rate')
# Add to the graph each operation
loss_op, logits, ctc_logits, decoder_outputs_train, decoder_outputs_infer = model.compute_loss(
model.inputs_pl_list[0],
model.labels_pl_list[0],
model.ctc_labels_pl_list[0],
model.inputs_seq_len_pl_list[0],
model.labels_seq_len_pl_list[0],
model.keep_prob_encoder_pl_list[0],
model.keep_prob_decoder_pl_list[0],
model.keep_prob_embedding_pl_list[0])
train_op = model.train(loss_op,
optimizer='adam',
learning_rate=learning_rate_pl)
decode_op_train, decode_op_infer = model.decode(
decoder_outputs_train,
decoder_outputs_infer)
ler_op = model.compute_ler(model.labels_st_true_pl,
model.labels_st_pred_pl)
# Define learning rate controller
learning_rate = 1e-3
lr_controller = Controller(learning_rate_init=learning_rate,
decay_start_epoch=20,
decay_rate=0.9,
decay_patient_epoch=10,
lower_better=True)
# Add the variable initializer operation
init_op = tf.global_variables_initializer()
# Count total parameters
parameters_dict, total_parameters = count_total_parameters(
tf.trainable_variables())
for parameter_name in sorted(parameters_dict.keys()):
print("%s %d" %
(parameter_name, parameters_dict[parameter_name]))
print("Total %d variables, %s M parameters" %
(len(parameters_dict.keys()),
"{:,}".format(total_parameters / 1000000)))
# Make feed dict
feed_dict = {
model.inputs_pl_list[0]: inputs,
model.labels_pl_list[0]: labels,
model.ctc_labels_pl_list[0]: list2sparsetensor(ctc_labels, padded_value=-1),
model.inputs_seq_len_pl_list[0]: inputs_seq_len,
model.labels_seq_len_pl_list[0]: labels_seq_len,
model.keep_prob_encoder_pl_list[0]: 0.8,
model.keep_prob_decoder_pl_list[0]: 1.0,
model.keep_prob_embedding_pl_list[0]: 1.0,
learning_rate_pl: learning_rate
}
idx2phone = Idx2phone(map_file_path='./phone61.txt')
with tf.Session() as sess:
# Initialize parameters
sess.run(init_op)
# Wrapper for tfdbg
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# Train model
max_steps = 1000
start_time_step = time.time()
for step in range(max_steps):
# Compute loss
_, loss_train = sess.run(
[train_op, loss_op], feed_dict=feed_dict)
# Gradient check
# grads = sess.run(model.clipped_grads,
# feed_dict=feed_dict)
# for grad in grads:
# print(np.max(grad))
if (step + 1) % 10 == 0:
# Change to evaluation mode
feed_dict[model.keep_prob_encoder_pl_list[0]] = 1.0
feed_dict[model.keep_prob_decoder_pl_list[0]] = 1.0
feed_dict[model.keep_prob_embedding_pl_list[0]] = 1.0
# Predict class ids
predicted_ids_train, predicted_ids_infer = sess.run(
[decode_op_train, decode_op_infer],
feed_dict=feed_dict)
# Compute accuracy
try:
feed_dict_ler = {
model.labels_st_true_pl: list2sparsetensor(
labels, padded_value=model.eos_index),
model.labels_st_pred_pl: list2sparsetensor(
predicted_ids_infer, padded_value=model.eos_index)
}
ler_train = sess.run(
ler_op, feed_dict=feed_dict_ler)
except IndexError:
ler_train = 1
duration_step = time.time() - start_time_step
print('Step %d: loss = %.3f / ler = %.4f (%.3f sec) / lr = %.5f' %
(step + 1, loss_train, ler_train, duration_step, learning_rate))
start_time_step = time.time()
# Visualize
if label_type == 'character':
print('True : %s' %
idx2alpha(labels[0]))
print('Pred (Training) : <%s' %
idx2alpha(predicted_ids_train[0]))
print('Pred (Inference): <%s' %
idx2alpha(predicted_ids_infer[0]))
else:
print('True : %s' %
idx2phone(labels[0]))
print('Pred (Training) : < %s' %
idx2phone(predicted_ids_train[0]))
print('Pred (Inference): < %s' %
idx2phone(predicted_ids_infer[0]))
if ler_train < 0.1:
print('Model is Converged.')
break
# Update learning rate
learning_rate = lr_controller.decay_lr(
learning_rate=learning_rate,
epoch=step,
value=ler_train)
feed_dict[learning_rate_pl] = learning_rate
if __name__ == "__main__":
tf.test.main()
| mit | -4,821,190,604,367,840,000 | 43.537037 | 107 | 0.440644 | false |
hammerlab/avm | normalizer.py | 1 | 1407 | class Normalizer(object):
"""
Subtract mean and divide features by standard deviation
before fitting/predicting
"""
def __init__(self, model, Xm=None, Xs=None):
self.model = model
self.Xm = Xm
self.Xs = Xs
# only provide `predict_proba` method to normalized model
# if it's available in the underlying model
if hasattr(model, 'predict_proba'):
self.predict_proba = self._predict_proba
def __str__(self):
return "Normalizer(%s)" % self.model
@property
def coef_(self):
return self.model.coef_
def fit(self, X, y, *args, **kwargs):
self.Xm = X.mean(axis=0)
X = X - self.Xm
self.Xs = X.std(axis=0)
self.Xs[self.Xs == 0] = 1
X = X / self.Xs
self.model.fit(X, y, *args, **kwargs)
def predict(self, X, *args, **kwargs):
X = X - self.Xm
X /= self.Xs
return self.model.predict(X, *args, **kwargs)
def _predict_proba(self, X, *args, **kwargs):
X = X - self.Xm
X /= self.Xs
return self.model.predict_proba(X, *args, **kwargs)
def decision_function(self, X, *args, **kwargs):
X = X - self.Xm
X /= self.Xs
return self.model.decision_function(X, *args, **kwargs)
def get_params(self, deep=False):
return {'Xm': self.Xm, 'Xs': self.Xs, 'model': self.model}
| apache-2.0 | 4,569,969,514,949,997,600 | 28.93617 | 66 | 0.552239 | false |
automl/ChaLearn_Automatic_Machine_Learning_Challenge_2015 | 004_evita.py | 1 | 16535 | import argparse
import os
from joblib import Parallel, delayed
import numpy as np
import autosklearn
import autosklearn.data
import autosklearn.data.competition_data_manager
from autosklearn.pipeline.classification import SimpleClassificationPipeline
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
input = args.input
dataset = 'evita'
output = args.output
path = os.path.join(input, dataset)
D = autosklearn.data.competition_data_manager.CompetitionDataManager(path)
X = D.data['X_train']
y = D.data['Y_train']
X_valid = D.data['X_valid']
X_test = D.data['X_test']
# Replace the following array by a new ensemble
choices = \
[(0.320000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'xgradient_boosting',
'classifier:xgradient_boosting:base_score': 0.5,
'classifier:xgradient_boosting:colsample_bylevel': 1,
'classifier:xgradient_boosting:colsample_bytree': 1,
'classifier:xgradient_boosting:gamma': 0,
'classifier:xgradient_boosting:learning_rate': 0.083957576764175909,
'classifier:xgradient_boosting:max_delta_step': 0,
'classifier:xgradient_boosting:max_depth': 9,
'classifier:xgradient_boosting:min_child_weight': 1,
'classifier:xgradient_boosting:n_estimators': 207,
'classifier:xgradient_boosting:reg_alpha': 0,
'classifier:xgradient_boosting:reg_lambda': 1,
'classifier:xgradient_boosting:scale_pos_weight': 1,
'classifier:xgradient_boosting:subsample': 0.79041547139233681,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.033271689466917775,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.140000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 1.0,
'classifier:extra_trees:min_samples_leaf': 1,
'classifier:extra_trees:min_samples_split': 2,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.10000000000000001,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.100000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.904721926856924,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_samples_split': 7,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.036176664478653142,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_percentile_classification',
'preprocessor:select_percentile_classification:percentile': 91.78175624881186,
'preprocessor:select_percentile_classification:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.0,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.18915206967606921,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.080000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 0.59875097583441961,
'classifier:extra_trees:min_samples_leaf': 1,
'classifier:extra_trees:min_samples_split': 2,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.13663946292601112,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.060000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'True',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 1.0,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 2,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.10000000000000001,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'False',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 2.4071018354857294,
'classifier:extra_trees:min_samples_leaf': 2,
'classifier:extra_trees:min_samples_split': 9,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.34844304591109215,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 2.3037777871550227,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 6,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'standardize'})),
(0.040000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.9417933307381925,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 2,
'classifier:random_forest:min_samples_split': 3,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:minimum_fraction': 0.076515481895064422,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.39998541946519961,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'True',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 2.6560184696178109,
'classifier:extra_trees:min_samples_leaf': 1,
'classifier:extra_trees:min_samples_split': 9,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.49576705570976692,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'extra_trees',
'classifier:extra_trees:bootstrap': 'True',
'classifier:extra_trees:criterion': 'gini',
'classifier:extra_trees:max_depth': 'None',
'classifier:extra_trees:max_features': 2.8762254807814838,
'classifier:extra_trees:min_samples_leaf': 7,
'classifier:extra_trees:min_samples_split': 7,
'classifier:extra_trees:min_weight_fraction_leaf': 0.0,
'classifier:extra_trees:n_estimators': 100,
'imputation:strategy': 'mean',
'one_hot_encoding:minimum_fraction': 0.00037525617209727315,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.36323622954313295,
'preprocessor:select_rates:mode': 'fpr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'min/max'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'gini',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.7911724862642,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 1,
'classifier:random_forest:min_samples_split': 11,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'median',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.47510655107871991,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'standardize'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'none',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 4.9237570615905248,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 13,
'classifier:random_forest:min_samples_split': 15,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:minimum_fraction': 0.00028264986304734767,
'one_hot_encoding:use_minimum_fraction': 'True',
'preprocessor:__choice__': 'select_rates',
'preprocessor:select_rates:alpha': 0.27910583898194102,
'preprocessor:select_rates:mode': 'fdr',
'preprocessor:select_rates:score_func': 'chi2',
'rescaling:__choice__': 'none'})),
(0.020000, SimpleClassificationPipeline(configuration={
'balancing:strategy': 'weighting',
'classifier:__choice__': 'random_forest',
'classifier:random_forest:bootstrap': 'False',
'classifier:random_forest:criterion': 'entropy',
'classifier:random_forest:max_depth': 'None',
'classifier:random_forest:max_features': 3.0988613659452917,
'classifier:random_forest:max_leaf_nodes': 'None',
'classifier:random_forest:min_samples_leaf': 3,
'classifier:random_forest:min_samples_split': 3,
'classifier:random_forest:min_weight_fraction_leaf': 0.0,
'classifier:random_forest:n_estimators': 100,
'imputation:strategy': 'most_frequent',
'one_hot_encoding:use_minimum_fraction': 'False',
'preprocessor:__choice__': 'no_preprocessing',
'rescaling:__choice__': 'none'})),
]
targets = []
predictions = []
predictions_valid = []
predictions_test = []
def fit_and_predict(estimator, weight, X, y):
try:
estimator.fit(X.copy(), y.copy())
pv = estimator.predict_proba(X_valid.copy()) * weight
pt = estimator.predict_proba(X_test.copy()) * weight
except Exception as e:
print(e)
print(estimator.configuration)
pv = None
pt = None
return pv, pt
# Make predictions and weight them
all_predictions = Parallel(n_jobs=-1)(delayed(fit_and_predict) \
(estimator, weight, X, y) for
weight, estimator in choices)
for pv, pt in all_predictions:
predictions_valid.append(pv)
predictions_test.append(pt)
# Output the predictions
for name, predictions in [('valid', predictions_valid),
('test', predictions_test)]:
predictions = np.array(predictions)
predictions = np.sum(predictions, axis=0).astype(np.float32)
predictions = predictions[:, 1].reshape((-1, 1))
filepath = os.path.join(output, '%s_%s_000.predict' % (dataset, name))
np.savetxt(filepath, predictions, delimiter=' ', fmt='%.4e')
| bsd-2-clause | -1,401,804,744,981,788,000 | 49.258359 | 87 | 0.640762 | false |
hmen89/odoo | addons/l10n_be_coda/__openerp__.py | 12 | 3885 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Import Bank CODA Statements',
'version': '2.1',
'author': 'Noviat',
'category': 'Accounting & Finance',
'description': '''
Module to import CODA bank statements.
======================================
Supported are CODA flat files in V2 format from Belgian bank accounts.
----------------------------------------------------------------------
* CODA v1 support.
* CODA v2.2 support.
* Foreign Currency support.
* Support for all data record types (0, 1, 2, 3, 4, 8, 9).
* Parsing & logging of all Transaction Codes and Structured Format
Communications.
* Automatic Financial Journal assignment via CODA configuration parameters.
* Support for multiple Journals per Bank Account Number.
* Support for multiple statements from different bank accounts in a single
CODA file.
* Support for 'parsing only' CODA Bank Accounts (defined as type='info' in
the CODA Bank Account configuration records).
* Multi-language CODA parsing, parsing configuration data provided for EN,
NL, FR.
The machine readable CODA Files are parsed and stored in human readable format in
CODA Bank Statements. Also Bank Statements are generated containing a subset of
the CODA information (only those transaction lines that are required for the
creation of the Financial Accounting records). The CODA Bank Statement is a
'read-only' object, hence remaining a reliable representation of the original
CODA file whereas the Bank Statement will get modified as required by accounting
business processes.
CODA Bank Accounts configured as type 'Info' will only generate CODA Bank Statements.
A removal of one object in the CODA processing results in the removal of the
associated objects. The removal of a CODA File containing multiple Bank
Statements will also remove those associated statements.
Instead of a manual adjustment of the generated Bank Statements, you can also
re-import the CODA after updating the OpenERP database with the information that
was missing to allow automatic reconciliation.
Remark on CODA V1 support:
~~~~~~~~~~~~~~~~~~~~~~~~~~
In some cases a transaction code, transaction category or structured
communication code has been given a new or clearer description in CODA V2.The
description provided by the CODA configuration tables is based upon the CODA
V2.2 specifications.
If required, you can manually adjust the descriptions via the CODA configuration menu.
''',
'images': ['images/coda_logs.jpeg', 'images/import_coda_logs.jpeg'],
'depends': ['account_voucher', 'base_iban', 'l10n_be_invoice_bba', 'account_bank_statement_import'],
'demo': ['l10n_be_coda_demo.xml'],
'data': [
'l10n_be_coda_view.xml',
],
'auto_install': False,
'installable': True,
'license': 'AGPL-3',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,042,074,365,572,776,400 | 44.705882 | 104 | 0.683398 | false |
yyuu/botornado | boto/__init__.py | 2 | 24502 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.pyami.config import Config, BotoConfigLocations
from boto.storage_uri import BucketStorageUri, FileStorageUri
import boto.plugin
import os, re, sys
import logging
import logging.config
import urlparse
from boto.exception import InvalidUriError
__version__ = '2.2.2'
Version = __version__ # for backware compatibility
UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
config = Config()
def init_logging():
for file in BotoConfigLocations:
try:
logging.config.fileConfig(os.path.expanduser(file))
except:
pass
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('boto')
log.addHandler(NullHandler())
init_logging()
# convenience function to set logging to a particular file
def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.FileHandler(filepath)
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def set_stream_logger(name, level=logging.DEBUG, format_string=None):
global log
if not format_string:
format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
fh = logging.StreamHandler()
fh.setLevel(level)
formatter = logging.Formatter(format_string)
fh.setFormatter(formatter)
logger.addHandler(fh)
log = logger
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sqs.connection.SQSConnection`
:return: A connection to Amazon's SQS
"""
from boto.sqs.connection import SQSConnection
return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Amazon's S3
"""
from boto.s3.connection import S3Connection
return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
"""
@type gs_access_key_id: string
@param gs_access_key_id: Your Google Cloud Storage Access Key ID
@type gs_secret_access_key: string
@param gs_secret_access_key: Your Google Cloud Storage Secret Access Key
@rtype: L{GSConnection<boto.gs.connection.GSConnection>}
@return: A connection to Google's Storage service
"""
from boto.gs.connection import GSConnection
return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Amazon's EC2
"""
from boto.ec2.connection import EC2Connection
return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.elb.ELBConnection`
:return: A connection to Amazon's Load Balancing Service
"""
from boto.ec2.elb import ELBConnection
return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
:return: A connection to Amazon's Auto Scaling Service
"""
from boto.ec2.autoscale import AutoScaleConnection
return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
:return: A connection to Amazon's EC2 Monitoring service
"""
from boto.ec2.cloudwatch import CloudWatchConnection
return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sdb.connection.SDBConnection`
:return: A connection to Amazon's SDB
"""
from boto.sdb.connection import SDBConnection
return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.fps.connection import FPSConnection
return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.mturk.connection.MTurkConnection`
:return: A connection to MTurk
"""
from boto.mturk.connection import MTurkConnection
return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.fps.connection.FPSConnection`
:return: A connection to FPS
"""
from boto.cloudfront import CloudFrontConnection
return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.vpc.VPCConnection`
:return: A connection to VPC
"""
from boto.vpc import VPCConnection
return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.rds.RDSConnection`
:return: A connection to RDS
"""
from boto.rds import RDSConnection
return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.emr.EmrConnection`
:return: A connection to Elastic mapreduce
"""
from boto.emr import EmrConnection
return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sns.SNSConnection`
:return: A connection to Amazon's SNS
"""
from boto.sns import SNSConnection
return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.iam.IAMConnection`
:return: A connection to Amazon's IAM
"""
from boto.iam import IAMConnection
return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dns.Route53Connection`
:return: A connection to Amazon's Route53 DNS Service
"""
from boto.route53 import Route53Connection
return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_cloudformation(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.cloudformation.CloudFormationConnection`
:return: A connection to Amazon's CloudFormation Service
"""
from boto.cloudformation import CloudFormationConnection
return CloudFormationConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Eucalyptus', is_secure=False,
**kwargs):
"""
Connect to a Eucalyptus service.
:type host: string
:param host: the host name or ip address of the Eucalyptus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2 import EC2Connection
from boto.ec2.regioninfo import RegionInfo
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'eucalyptus_host', None)
reg = RegionInfo(name='eucalyptus', endpoint=host)
return EC2Connection(aws_access_key_id, aws_secret_access_key,
region=reg, port=port, path=path,
is_secure=is_secure, **kwargs)
def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
"""
Connect to an EC2 Api endpoint. Additional arguments are passed
through to connect_ec2.
:type url: string
:param url: A url for the ec2 api endpoint to connect to
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ec2.connection.EC2Connection`
:return: A connection to Eucalyptus server
"""
from boto.ec2.regioninfo import RegionInfo
purl = urlparse.urlparse(url)
kwargs['port'] = purl.port
kwargs['host'] = purl.hostname
kwargs['path'] = purl.path
if not 'is_secure' in kwargs:
kwargs['is_secure'] = (purl.scheme == "https")
kwargs['region'] = RegionInfo(name = purl.hostname,
endpoint = purl.hostname)
kwargs['aws_access_key_id']=aws_access_key_id
kwargs['aws_secret_access_key']=aws_secret_access_key
return(connect_ec2(**kwargs))
def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None,
port=8773, path='/services/Walrus', is_secure=False,
**kwargs):
"""
Connect to a Walrus service.
:type host: string
:param host: the host name or ip address of the Walrus server
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to Walrus
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
# Check for values in boto config, if not supplied as args
if not aws_access_key_id:
aws_access_key_id = config.get('Credentials',
'euca_access_key_id',
None)
if not aws_secret_access_key:
aws_secret_access_key = config.get('Credentials',
'euca_secret_access_key',
None)
if not host:
host = config.get('Boto', 'walrus_host', None)
return S3Connection(aws_access_key_id, aws_secret_access_key,
host=host, port=port, path=path,
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.ses.SESConnection`
:return: A connection to Amazon's SES
"""
from boto.ses import SESConnection
return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_sts(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.sts.STSConnection`
:return: A connection to Amazon's STS
"""
from boto.sts import STSConnection
return STSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
is_secure=False, **kwargs):
"""
Connect to the Internet Archive via their S3-like API.
:type ia_access_key_id: string
:param ia_access_key_id: Your IA Access Key ID. This will also look in your
boto config file for an entry in the Credentials
section called "ia_access_key_id"
:type ia_secret_access_key: string
:param ia_secret_access_key: Your IA Secret Access Key. This will also
look in your boto config file for an entry
in the Credentials section called
"ia_secret_access_key"
:rtype: :class:`boto.s3.connection.S3Connection`
:return: A connection to the Internet Archive
"""
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
access_key = config.get('Credentials', 'ia_access_key_id',
ia_access_key_id)
secret_key = config.get('Credentials', 'ia_secret_access_key',
ia_secret_access_key)
return S3Connection(access_key, secret_key,
host='s3.us.archive.org',
calling_format=OrdinaryCallingFormat(),
is_secure=is_secure, **kwargs)
def connect_dynamodb(aws_access_key_id=None,
aws_secret_access_key=None,
**kwargs):
"""
:type aws_access_key_id: string
:param aws_access_key_id: Your AWS Access Key ID
:type aws_secret_access_key: string
:param aws_secret_access_key: Your AWS Secret Access Key
:rtype: :class:`boto.dynamodb.layer2.Layer2`
:return: A connection to the Layer2 interface for DynamoDB.
"""
from boto.dynamodb.layer2 import Layer2
return Layer2(aws_access_key_id, aws_secret_access_key, **kwargs)
def check_extensions(module_name, module_path):
"""
This function checks for extensions to boto modules. It should be called in the
__init__.py file of all boto modules. See:
http://code.google.com/p/boto/wiki/ExtendModules
for details.
"""
option_name = '%s_extend' % module_name
version = config.get('Boto', option_name, None)
if version:
dirname = module_path[0]
path = os.path.join(dirname, version)
if os.path.isdir(path):
log.info('extending module %s with: %s' % (module_name, path))
module_path.insert(0, path)
_aws_cache = {}
def _get_aws_conn(service):
global _aws_cache
conn = _aws_cache.get(service)
if not conn:
meth = getattr(sys.modules[__name__], 'connect_' + service)
conn = meth()
_aws_cache[service] = conn
return conn
def lookup(service, name):
global _aws_cache
conn = _get_aws_conn(service)
obj = _aws_cache.get('.'.join((service, name)), None)
if not obj:
obj = conn.lookup(name)
_aws_cache['.'.join((service, name))] = obj
return obj
def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
bucket_storage_uri_class=BucketStorageUri,
suppress_consec_slashes=True):
"""
Instantiate a StorageUri from a URI string.
:type uri_str: string
:param uri_str: URI naming bucket + optional object.
:type default_scheme: string
:param default_scheme: default scheme for scheme-less URIs.
:type debug: int
:param debug: debug level to pass in to boto connection (range 0..2).
:type validate: bool
:param validate: whether to check for bucket name validity.
:type bucket_storage_uri_class: BucketStorageUri interface.
:param bucket_storage_uri_class: Allows mocking for unit tests.
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
We allow validate to be disabled to allow caller
to implement bucket-level wildcarding (outside the boto library;
see gsutil).
:rtype: :class:`boto.StorageUri` subclass
:return: StorageUri subclass for given URI.
``uri_str`` must be one of the following formats:
* gs://bucket/name
* s3://bucket/name
* gs://bucket
* s3://bucket
* filename
The last example uses the default scheme ('file', unless overridden)
"""
# Manually parse URI components instead of using urlparse.urlparse because
# what we're calling URIs don't really fit the standard syntax for URIs
# (the latter includes an optional host/net location part).
end_scheme_idx = uri_str.find('://')
if end_scheme_idx == -1:
# Check for common error: user specifies gs:bucket instead
# of gs://bucket. Some URI parsers allow this, but it can cause
# confusion for callers, so we don't.
if uri_str.find(':') != -1:
raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
scheme = default_scheme.lower()
path = uri_str
else:
scheme = uri_str[0:end_scheme_idx].lower()
path = uri_str[end_scheme_idx + 3:]
if scheme not in ['file', 's3', 'gs']:
raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
if scheme == 'file':
# For file URIs we have no bucket name, and use the complete path
# (minus 'file://') as the object name.
is_stream = False
if path == '-':
is_stream = True
return FileStorageUri(path, debug, is_stream)
else:
path_parts = path.split('/', 1)
bucket_name = path_parts[0]
if (validate and bucket_name and
# Disallow buckets violating charset or not [3..255] chars total.
(not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name)
# Disallow buckets with individual DNS labels longer than 63.
or re.search('[-_a-z0-9]{64}', bucket_name))):
raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
# If enabled, ensure the bucket name is valid, to avoid possibly
# confusing other parts of the code. (For example if we didn't
# catch bucket names containing ':', when a user tried to connect to
# the server with that name they might get a confusing error about
# non-integer port numbers.)
object_name = ''
if len(path_parts) > 1:
object_name = path_parts[1]
return bucket_storage_uri_class(
scheme, bucket_name, object_name, debug,
suppress_consec_slashes=suppress_consec_slashes)
def storage_uri_for_key(key):
"""Returns a StorageUri for the given key.
:type key: :class:`boto.s3.key.Key` or subclass
:param key: URI naming bucket + optional object.
"""
if not isinstance(key, boto.s3.key.Key):
raise InvalidUriError('Requested key (%s) is not a subclass of '
'boto.s3.key.Key' % str(type(key)))
prov_name = key.bucket.connection.provider.get_provider_name()
uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
return storage_uri(uri_str)
boto.plugin.load_plugins(config)
| mit | 617,761,813,169,589,000 | 36.464832 | 89 | 0.662354 | false |
steveb/heat | heat/tests/openstack/cinder/test_volume_type_encryption.py | 1 | 4307 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.engine.clients.os import cinder as c_plugin
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
cinder_volume_type_encryption = {
'heat_template_version': '2015-04-30',
'resources': {
'my_encrypted_vol_type': {
'type': 'OS::Cinder::EncryptedVolumeType',
'properties': {
'provider': 'nova.volume.encryptors.luks.LuksEncryptor',
'control_location': 'front-end',
'cipher': 'aes-xts-plain64',
'key_size': '512',
'volume_type': '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
}
}
}
}
class CinderEncryptedVolumeTypeTest(common.HeatTestCase):
def setUp(self):
super(CinderEncryptedVolumeTypeTest, self).setUp()
self.ctx = utils.dummy_context()
self.patchobject(c_plugin.CinderClientPlugin, 'has_extension',
return_value=True)
self.stack = stack.Stack(
self.ctx, 'cinder_vol_type_encryption_test_stack',
template.Template(cinder_volume_type_encryption)
)
self.my_encrypted_vol_type = self.stack['my_encrypted_vol_type']
cinder = mock.MagicMock()
self.cinderclient = mock.MagicMock()
self.my_encrypted_vol_type.client = cinder
cinder.return_value = self.cinderclient
self.volume_encryption_types = (
self.cinderclient.volume_encryption_types)
def test_handle_create(self):
value = mock.MagicMock()
volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
value.volume_type_id = volume_type_id
self.volume_encryption_types.create.return_value = value
with mock.patch.object(self.my_encrypted_vol_type.client_plugin(),
'get_volume_type') as mock_get_volume_type:
mock_get_volume_type.return_value = volume_type_id
self.my_encrypted_vol_type.handle_create()
mock_get_volume_type.assert_called_once_with(volume_type_id)
specs = {
'control_location': 'front-end',
'cipher': 'aes-xts-plain64',
'key_size': 512,
'provider': 'nova.volume.encryptors.luks.LuksEncryptor'
}
self.volume_encryption_types.create.assert_called_once_with(
volume_type=volume_type_id, specs=specs)
self.assertEqual(volume_type_id,
self.my_encrypted_vol_type.resource_id)
def test_handle_update(self):
update_args = {
'control_location': 'back-end',
'key_size': 256,
'cipher': 'aes-cbc-essiv',
'provider':
'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
}
volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
self.my_encrypted_vol_type.resource_id = volume_type_id
self.my_encrypted_vol_type.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=update_args)
self.volume_encryption_types.update.assert_called_once_with(
volume_type=volume_type_id, specs=update_args)
def test_volume_type_show_resource(self):
volume_type_id = '01bd581d-33fe-4d6d-bd7b-70ae076d39fb'
self.my_encrypted_vol_type.resource_id = volume_type_id
volume_type = mock.Mock()
volume_type.to_dict = lambda: {'vtype': 'info'}
self.volume_encryption_types.get.return_value = volume_type
self.assertEqual({'vtype': 'info'},
self.my_encrypted_vol_type.FnGetAtt('show'))
| apache-2.0 | 3,891,123,585,374,962,000 | 39.632075 | 78 | 0.612956 | false |
veridiam/Madcow-Waaltz | build/lib/madcow/include/twisted/python/compat.py | 10 | 5694 | # -*- test-case-name: twisted.test.test_compat -*-
#
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Compatibility module to provide backwards compatibility for useful Python
features.
This is mainly for use of internal Twisted code. We encourage you to use
the latest version of Python directly from your code, if possible.
"""
import sys, string, socket, struct
def inet_pton(af, addr):
if af == socket.AF_INET:
return socket.inet_aton(addr)
elif af == getattr(socket, 'AF_INET6', 'AF_INET6'):
if [x for x in addr if x not in string.hexdigits + ':.']:
raise ValueError("Illegal characters: %r" % (''.join(x),))
parts = addr.split(':')
elided = parts.count('')
ipv4Component = '.' in parts[-1]
if len(parts) > (8 - ipv4Component) or elided > 3:
raise ValueError("Syntactically invalid address")
if elided == 3:
return '\x00' * 16
if elided:
zeros = ['0'] * (8 - len(parts) - ipv4Component + elided)
if addr.startswith('::'):
parts[:2] = zeros
elif addr.endswith('::'):
parts[-2:] = zeros
else:
idx = parts.index('')
parts[idx:idx+1] = zeros
if len(parts) != 8 - ipv4Component:
raise ValueError("Syntactically invalid address")
else:
if len(parts) != (8 - ipv4Component):
raise ValueError("Syntactically invalid address")
if ipv4Component:
if parts[-1].count('.') != 3:
raise ValueError("Syntactically invalid address")
rawipv4 = socket.inet_aton(parts[-1])
unpackedipv4 = struct.unpack('!HH', rawipv4)
parts[-1:] = [hex(x)[2:] for x in unpackedipv4]
parts = [int(x, 16) for x in parts]
return struct.pack('!8H', *parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
def inet_ntop(af, addr):
if af == socket.AF_INET:
return socket.inet_ntoa(addr)
elif af == socket.AF_INET6:
if len(addr) != 16:
raise ValueError("address length incorrect")
parts = struct.unpack('!8H', addr)
curBase = bestBase = None
for i in range(8):
if not parts[i]:
if curBase is None:
curBase = i
curLen = 0
curLen += 1
else:
if curBase is not None:
if bestBase is None or curLen > bestLen:
bestBase = curBase
bestLen = curLen
curBase = None
if curBase is not None and (bestBase is None or curLen > bestLen):
bestBase = curBase
bestLen = curLen
parts = [hex(x)[2:] for x in parts]
if bestBase is not None:
parts[bestBase:bestBase + bestLen] = ['']
if parts[0] == '':
parts.insert(0, '')
if parts[-1] == '':
parts.insert(len(parts) - 1, '')
return ':'.join(parts)
else:
raise socket.error(97, 'Address family not supported by protocol')
try:
socket.inet_pton(socket.AF_INET6, "::")
except (AttributeError, NameError, socket.error):
socket.inet_pton = inet_pton
socket.inet_ntop = inet_ntop
socket.AF_INET6 = 'AF_INET6'
adict = dict
# OpenSSL/__init__.py imports OpenSSL.tsafe. OpenSSL/tsafe.py imports
# threading. threading imports thread. All to make this stupid threadsafe
# version of its Connection class. We don't even care about threadsafe
# Connections. In the interest of not screwing over some crazy person
# calling into OpenSSL from another thread and trying to use Twisted's SSL
# support, we don't totally destroy OpenSSL.tsafe, but we will replace it
# with our own version which imports threading as late as possible.
class tsafe(object):
class Connection:
"""
OpenSSL.tsafe.Connection, defined in such a way as to not blow.
"""
__module__ = 'OpenSSL.tsafe'
def __init__(self, *args):
from OpenSSL import SSL as _ssl
self._ssl_conn = apply(_ssl.Connection, args)
from threading import _RLock
self._lock = _RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv',
'read', 'renegotiate', 'bind', 'listen', 'connect',
'accept', 'setblocking', 'fileno', 'shutdown',
'close', 'get_cipher_list', 'getpeername',
'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data',
'state_string', 'sock_shutdown',
'get_peer_certificate', 'want_read', 'want_write',
'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall'):
exec """def %s(self, *args):
self._lock.acquire()
try:
return apply(self._ssl_conn.%s, args)
finally:
self._lock.release()\n""" % (f, f)
sys.modules['OpenSSL.tsafe'] = tsafe
import operator
try:
operator.attrgetter
except AttributeError:
class attrgetter(object):
def __init__(self, name):
self.name = name
def __call__(self, obj):
return getattr(obj, self.name)
operator.attrgetter = attrgetter
try:
set = set
except NameError:
from sets import Set as set
try:
frozenset = frozenset
except NameError:
from sets import ImmutableSet as frozenset
| gpl-3.0 | 6,915,360,839,763,245,000 | 32.892857 | 75 | 0.556551 | false |
naototty/pyflag | src/plugins/DiskForensics/FileHandlers/Partitions.py | 5 | 4057 | """ This module handles automatic loading of partition tables.
"""
import pyflag.Scanner as Scanner
import pyflag.DB as DB
import pyflag.FileSystem as FileSystem
import pyflag.pyflaglog as pyflaglog
import pyflag.CacheManager as CacheManager
import pdb
import sk
import pyflag.Magic as Magic
import pyflag.FlagFramework as FlagFramework
SECTOR_SIZE = 512
class PartitionScanner(Scanner.GenScanFactory):
""" Detects partitions in the image and creates VFS nodes for them.
"""
default = True
group = "Disk Forensics"
def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
if 'x86 boot sector' in type:
try:
parts = sk.mmls(fd)
except IOError,e:
print e
return
for part in parts:
## Make a unique and sensible name for this partition
name = "%s @ 0x%X" % (part[2], part[0])
## Add new maps for each partition
map = CacheManager.AFF4_MANAGER.create_cache_map(
fd.case,
"%s/%s" % (fd.urn.parser.query, name))
map.write_from(fd.urn, SECTOR_SIZE * part[0],
SECTOR_SIZE * part[1])
map.close()
## Now we recursively scan each object
fsfd = FileSystem.DBFS(fd.case)
new_fd = fsfd.open(inode_id = map.inode_id)
try:
fs = sk.skfs(new_fd)
fs.close()
## Lets add a hint
Magic.set_magic(fd.case,
inode_id = map.inode_id,
mime = "application/filesystem",
magic = "Filesystem")
except: pass
Scanner.scan_inode_distributed(fd.case, map.inode_id,
scanners, cookie)
class FilesystemLoader(Scanner.GenScanFactory):
""" A Scanner to automatically load filesystem """
def create_map(self, fd, fs, skfs_inode, path):
block_size = fs.block_size
if str(skfs_inode) == "0-0-0":
return 1
if skfs_inode.alloc:
status = 'alloc'
else:
status = 'deleted'
## Add the map under the path
skfd = fs.open(inode=skfs_inode)
skfd.seek(0,2)
size = skfd.tell()
map = CacheManager.AFF4_MANAGER.create_cache_map(
fd.case,
"%s/__inodes__/%s" % (fd.urn.parser.query, skfs_inode),
size = size, target = fd.urn,
status=status)
for block in skfd.blocks():
map.write_from(fd.urn, block * block_size, block_size)
## update the size of the map
map.size.set(size)
CacheManager.AFF4_MANAGER.create_link(
fd.case,
map.urn, FlagFramework.sane_join(fd.urn.parser.query, path))
map.close()
def scan(self, fd, scanners, type, mime, cookie, scores=None, **args):
if 'Filesystem' in type:
print "Will load %s" % fd.urn.value
fs = sk.skfs(fd)
for root, dirs, files in fs.walk('/', unalloc=True, inodes=True):
for d, dirname in dirs:
self.create_map(fd, fs, d, FlagFramework.sane_join(root[1], dirname))
for f, filename in files:
self.create_map(fd, fs, f, FlagFramework.sane_join(root[1], filename))
## UnitTests:
import unittest
import pyflag.pyflagsh as pyflagsh
import pyflag.tests
class PartitionTest(pyflag.tests.ScannerTest):
""" Test Partition scanner and Filesystem loader """
test_case = "PyFlagTestCase"
test_file = "pyflag_stdimage_0.5.e01"
def test01PartitionScan(self):
""" Check the Partition scanner works """
env = pyflagsh.environment(case=self.test_case)
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'*'])
| gpl-2.0 | 6,980,197,623,057,290,000 | 31.456 | 90 | 0.54104 | false |
invesalius/invesalius3 | invesalius/data/volume.py | 4 | 32869 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import plistlib
import os
import weakref
from distutils.version import LooseVersion
import numpy
import vtk
import wx
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.project as prj
import invesalius.data.slice_ as slice_
import invesalius.data.converters as converters
import invesalius.data.vtk_utils as vtk_utils
from vtk.util import numpy_support
import invesalius.session as ses
from invesalius import inv_paths
Kernels = {
"Basic Smooth 5x5" : [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 4.0, 4.0, 4.0, 1.0,
1.0, 4.0, 12.0, 4.0, 1.0,
1.0, 4.0, 4.0, 4.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
}
SHADING = {
"Default": {
"ambient" :0.15,
"diffuse" :0.9,
"specular" :0.3,
"specularPower" :15,
},
"Glossy Vascular":{
"ambient" :0.15,
"diffuse" :0.28,
"specular" :1.42,
"specularPower" :50,
},
"Glossy Bone": {
"ambient" :0.15,
"diffuse" :0.24,
"specular" :1.17,
"specularPower" :6.98,
},
"Endoscopy": {
"ambient" :0.12,
"diffuse" :0.64,
"specular" :0.73,
"specularPower" :50,
}
}
class Volume():
def __init__(self):
self.config = None
self.exist = None
self.color_transfer = None
self.opacity_transfer_func = None
self.ww = None
self.wl = None
self.curve = 0
self.plane = None
self.plane_on = False
self.volume = None
self.image = None
self.loaded_image = 0
self.to_reload = False
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.OnHideVolume,
'Hide raycasting volume')
Publisher.subscribe(self.OnUpdatePreset,
'Update raycasting preset')
Publisher.subscribe(self.OnSetCurve,
'Set raycasting curve')
Publisher.subscribe(self.OnSetWindowLevel,
'Set raycasting wwwl')
Publisher.subscribe(self.Refresh,
'Set raycasting refresh')
Publisher.subscribe(self.OnSetRelativeWindowLevel,
'Set raycasting relative window and level')
Publisher.subscribe(self.OnEnableTool,
'Enable raycasting tool')
Publisher.subscribe(self.OnCloseProject, 'Close project data')
Publisher.subscribe(self.ChangeBackgroundColour,
'Change volume viewer background colour')
Publisher.subscribe(self.ResetRayCasting, 'Reset Raycasting')
Publisher.subscribe(self.OnFlipVolume, 'Flip volume')
def ResetRayCasting(self):
if self.exist:
self.exist = None
self.LoadVolume()
def OnCloseProject(self):
self.CloseProject()
def CloseProject(self):
#if self.plane:
# self.plane = None
# Publisher.sendMessage('Remove surface actor from viewer', self.plane_actor)
if self.plane:
self.plane.DestroyObjs()
del self.plane
self.plane = 0
if self.exist:
self.exist = None
Publisher.sendMessage('Remove surface actor from viewer', actor=self.volume)
Publisher.sendMessage('Disable volume cut menu')
Publisher.sendMessage('Unload volume', volume=self.volume)
del self.image
del self.imagedata
del self.final_imagedata
del self.volume
del self.color_transfer
del self.opacity_transfer_func
del self.volume_properties
del self.volume_mapper
self.volume = None
self.exist = False
self.loaded_image = False
self.image = None
self.final_imagedata = None
self.opacity_transfer_func = None
self.color_transfer = None
Publisher.sendMessage('Render volume viewer')
def OnLoadVolume(self, label):
label = label
#self.LoadConfig(label)
self.LoadVolume()
def OnHideVolume(self):
print('Hide Volume')
self.volume.SetVisibility(0)
if (self.plane and self.plane_on):
self.plane.Disable()
Publisher.sendMessage('Render volume viewer')
def OnShowVolume(self):
print('Show volume')
if self.exist:
print('Volume exists')
self.volume.SetVisibility(1)
if (self.plane and self.plane_on):
self.plane.Enable()
Publisher.sendMessage('Render volume viewer')
else:
print('Volume doesnt exit')
Publisher.sendMessage('Load raycasting preset',
preset_name=const.RAYCASTING_LABEL)
self.LoadConfig()
self.LoadVolume()
self.exist = 1
def OnUpdatePreset(self):
self.__load_preset_config()
if self.config:
if self.to_reload:
self.exist = False
Publisher.sendMessage('Unload volume', volume=self.volume)
if self.exist:
self.__load_preset()
self.volume.SetVisibility(1)
#Publisher.sendMessage('Render volume viewer')
else:
self.LoadVolume()
self.CalculateHistogram()
self.exist = 1
colour = self.GetBackgroundColour()
Publisher.sendMessage('Change volume viewer background colour', colour=colour)
Publisher.sendMessage('Change volume viewer gui colour', colour=colour)
else:
Publisher.sendMessage('Unload volume', volume=self.volume)
del self.image
del self.imagedata
del self.final_imagedata
del self.volume
del self.color_transfer
del self.opacity_transfer_func
del self.volume_properties
del self.volume_mapper
self.volume = None
self.exist = False
self.loaded_image = False
self.image = None
self.final_imagedata = None
self.opacity_transfer_func = None
self.color_transfer = None
Publisher.sendMessage('Render volume viewer')
def OnFlipVolume(self, axis):
print("Flipping Volume")
self.loaded_image = False
del self.image
self.image = None
self.to_reload = True
def __load_preset_config(self):
self.config = prj.Project().raycasting_preset
def __update_colour_table(self):
if self.config['advancedCLUT']:
self.Create16bColorTable(self.scale)
self.CreateOpacityTable(self.scale)
else:
self.Create8bColorTable(self.scale)
self.Create8bOpacityTable(self.scale)
def __load_preset(self):
# Update colour table
self.__update_colour_table()
# Update convolution filter
original_imagedata = self.imagedata.GetOutput()
imagedata = self.ApplyConvolution(original_imagedata)
self.volume_mapper.SetInputData(imagedata)
# Update other information
self.SetShading()
self.SetTypeRaycasting()
def OnSetCurve(self, curve):
self.curve = curve
self.CalculateWWWL()
ww = self.ww
wl = self.wl
Publisher.sendMessage('Set volume window and level text', ww=ww, wl=wl)
def OnSetRelativeWindowLevel(self, diff_wl, diff_ww):
ww = self.ww + diff_ww
wl = self.wl + diff_wl
Publisher.sendMessage('Set volume window and level text', ww=ww, wl=wl)
self.SetWWWL(ww, wl)
self.ww = ww
self.wl = wl
def OnSetWindowLevel(self, ww, wl, curve):
self.curve = curve
self.SetWWWL(ww, wl)
def SetWWWL(self, ww, wl):
if self.config['advancedCLUT']:
try:
curve = self.config['16bitClutCurves'][self.curve]
except IndexError:
self.curve = 0
curve = self.config['16bitClutCurves'][self.curve]
p1 = curve[0]
p2 = curve[-1]
half = (p2['x'] - p1['x']) / 2.0
middle = p1['x'] + half
shiftWL = wl - middle
shiftWW = p1['x'] + shiftWL - (wl - 0.5 * ww)
factor = 1.0
for n,i in enumerate(curve):
factor = abs(i['x'] - middle) / half
if factor < 0:
factor = 0
i['x'] += shiftWL
if n < len(curve)/2.0:
i['x'] -= shiftWW * factor
else:
i['x'] += shiftWW * factor
else:
self.config['wl'] = wl
self.config['ww'] = ww
self.__update_colour_table()
def CalculateWWWL(self):
"""
Get the window width & level from the selected curve
"""
try:
curve = self.config['16bitClutCurves'][self.curve]
except IndexError:
self.curve -= 1
curve = self.config['16bitClutCurves'][self.curve]
first_point = curve[0]['x']
last_point = curve[-1]['x']
self.ww = last_point - first_point
self.wl = first_point + self.ww / 2.0
def Refresh(self):
self.__update_colour_table()
def Create16bColorTable(self, scale):
if self.color_transfer:
color_transfer = self.color_transfer
else:
color_transfer = vtk.vtkColorTransferFunction()
color_transfer.RemoveAllPoints()
curve_table = self.config['16bitClutCurves']
color_table = self.config['16bitClutColors']
colors = []
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
r = color_table[i][j]['red']
g = color_table[i][j]['green']
b = color_table[i][j]['blue']
colors.append((gray_level, r, g, b))
color_transfer.AddRGBPoint(
self.TranslateScale(scale, gray_level),
r, g, b)
self.color_transfer = color_transfer
def Create8bColorTable(self, scale):
if self.color_transfer:
color_transfer = self.color_transfer
else:
color_transfer = vtk.vtkColorTransferFunction()
color_transfer.RemoveAllPoints()
color_preset = self.config['CLUT']
if color_preset != "No CLUT":
path = os.path.join(inv_paths.RAYCASTING_PRESETS_DIRECTORY,
'color_list', color_preset + '.plist')
with open(path, 'rb') as f:
p = plistlib.load(f, fmt=plistlib.FMT_XML)
r = p['Red']
g = p['Green']
b = p['Blue']
colors = list(zip(r,g,b))
else:
# Grayscale from black to white
colors = [(i, i, i) for i in range(256)]
ww = self.config['ww']
wl = self.TranslateScale(scale, self.config['wl'])
init = wl - ww/2.0
inc = ww / (len(colors) - 1.0)
for n,rgb in enumerate(colors):
color_transfer.AddRGBPoint(init + n * inc, *[i/255.0 for i in rgb])
self.color_transfer = color_transfer
def CreateOpacityTable(self, scale):
if self.opacity_transfer_func:
opacity_transfer_func = self.opacity_transfer_func
else:
opacity_transfer_func = vtk.vtkPiecewiseFunction()
opacity_transfer_func.RemoveAllPoints()
curve_table = self.config['16bitClutCurves']
opacities = []
ww = self.config['ww']
wl = self.config['wl']
self.ww = ww
self.wl = wl
l1 = wl - ww/2.0
l2 = wl + ww/2.0
k1 = 0.0
k2 = 1.0
opacity_transfer_func.AddSegment(0, 0, 2**16-1, 0)
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
#if gray_level <= l1:
# opacity = k1
#elif gray_level > l2:
# opacity = k2
#else:
opacity = lopacity['y']
opacities.append((gray_level, opacity))
opacity_transfer_func.AddPoint(
self.TranslateScale(scale, gray_level), opacity)
self.opacity_transfer_func = opacity_transfer_func
def Create8bOpacityTable(self, scale):
if self.opacity_transfer_func:
opacity_transfer_func = self.opacity_transfer_func
else:
opacity_transfer_func = vtk.vtkPiecewiseFunction()
opacity_transfer_func.RemoveAllPoints()
opacities = []
ww = self.config['ww']
wl = self.TranslateScale(scale, self.config['wl'])
l1 = wl - ww/2.0
l2 = wl + ww/2.0
self.ww = ww
self.wl = self.config['wl']
opacity_transfer_func.RemoveAllPoints()
opacity_transfer_func.AddSegment(0, 0, 2**16-1, 0)
k1 = 0.0
k2 = 1.0
opacity_transfer_func.AddPoint(l1, 0)
opacity_transfer_func.AddPoint(l2, 1)
self.opacity_transfer_func = opacity_transfer_func
return opacity_transfer_func
def GetBackgroundColour(self):
colour = (self.config['backgroundColorRedComponent'],
self.config['backgroundColorGreenComponent'],
self.config['backgroundColorBlueComponent'])
return colour
def ChangeBackgroundColour(self, colour):
if (self.config):
self.config['backgroundColorRedComponent'] = colour[0] * 255
self.config['backgroundColorGreenComponent'] = colour[1] * 255
self.config['backgroundColorBlueComponent'] = colour[2] * 255
def BuildTable():
curve_table = p['16bitClutCurves']
color_background = (p['backgroundColorRedComponent'],
p['backgroundColorGreenComponent'],
p['backgroundColorBlueComponent'])
color_background = [i for i in color_background]
opacities = []
colors = []
for i, l in enumerate(curve_table):
for j, lopacity in enumerate(l):
gray_level = lopacity['x']
opacity = lopacity['y']
opacities.append((gray_level, opacity))
r = color_table[i][j]['red']
g = color_table[i][j]['green']
b = color_table[i][j]['blue']
colors.append((gray_level, r, g, b))
return colors, opacities, color_background, p['useShading']
def SetShading(self):
if self.config['useShading']:
self.volume_properties.ShadeOn()
else:
self.volume_properties.ShadeOff()
shading = SHADING[self.config['shading']]
self.volume_properties.SetAmbient(shading['ambient'])
self.volume_properties.SetDiffuse(shading['diffuse'])
self.volume_properties.SetSpecular(shading['specular'])
self.volume_properties.SetSpecularPower(shading['specularPower'])
def SetTypeRaycasting(self):
if self.volume_mapper.IsA("vtkFixedPointVolumeRayCastMapper") or self.volume_mapper.IsA("vtkGPUVolumeRayCastMapper"):
if self.config.get('MIP', False):
self.volume_mapper.SetBlendModeToMaximumIntensity()
else:
self.volume_mapper.SetBlendModeToComposite()
else:
if self.config.get('MIP', False):
raycasting_function = vtk.vtkVolumeRayCastMIPFunction()
else:
raycasting_function = vtk.vtkVolumeRayCastCompositeFunction()
raycasting_function.SetCompositeMethodToInterpolateFirst()
if ses.Session().rendering == '0':
self.volume_mapper.SetVolumeRayCastFunction(raycasting_function)
def ApplyConvolution(self, imagedata, update_progress = None):
number_filters = len(self.config['convolutionFilters'])
if number_filters:
if not(update_progress):
update_progress = vtk_utils.ShowProgress(number_filters)
for filter in self.config['convolutionFilters']:
convolve = vtk.vtkImageConvolve()
convolve.SetInputData(imagedata)
convolve.SetKernel5x5([i/60.0 for i in Kernels[filter]])
# convolve.ReleaseDataFlagOn()
convolve_ref = weakref.ref(convolve)
convolve_ref().AddObserver("ProgressEvent", lambda obj,evt:
update_progress(convolve_ref(), "Rendering..."))
convolve.Update()
del imagedata
imagedata = convolve.GetOutput()
del convolve
#convolve.GetOutput().ReleaseDataFlagOn()
return imagedata
def LoadImage(self):
slice_data = slice_.Slice()
n_array = slice_data.matrix
spacing = slice_data.spacing
slice_number = 0
orientation = 'AXIAL'
image = converters.to_vtk(n_array, spacing, slice_number, orientation)
self.image = image
def LoadVolume(self):
proj = prj.Project()
#image = imagedata_utils.to_vtk(n_array, spacing, slice_number, orientation)
if not self.loaded_image:
self.LoadImage()
self.loaded_image = 1
image = self.image
number_filters = len(self.config['convolutionFilters'])
if (prj.Project().original_orientation == const.AXIAL):
flip_image = True
else:
flip_image = False
#if (flip_image):
update_progress= vtk_utils.ShowProgress(2 + number_filters)
# Flip original vtkImageData
flip = vtk.vtkImageFlip()
flip.SetInputData(image)
flip.SetFilteredAxis(1)
flip.FlipAboutOriginOn()
# flip.ReleaseDataFlagOn()
flip_ref = weakref.ref(flip)
flip_ref().AddObserver("ProgressEvent", lambda obj,evt:
update_progress(flip_ref(), "Rendering..."))
flip.Update()
image = flip.GetOutput()
scale = image.GetScalarRange()
self.scale = scale
cast = vtk.vtkImageShiftScale()
cast.SetInputData(image)
cast.SetShift(abs(scale[0]))
cast.SetOutputScalarTypeToUnsignedShort()
# cast.ReleaseDataFlagOn()
cast_ref = weakref.ref(cast)
cast_ref().AddObserver("ProgressEvent", lambda obj,evt:
update_progress(cast_ref(), "Rendering..."))
cast.Update()
image2 = cast
self.imagedata = image2
if self.config['advancedCLUT']:
self.Create16bColorTable(scale)
self.CreateOpacityTable(scale)
else:
self.Create8bColorTable(scale)
self.Create8bOpacityTable(scale)
image2 = self.ApplyConvolution(image2.GetOutput(), update_progress)
self.final_imagedata = image2
# Changed the vtkVolumeRayCast to vtkFixedPointVolumeRayCastMapper
# because it's faster and the image is better
# TODO: To test if it's true.
if const.TYPE_RAYCASTING_MAPPER:
volume_mapper = vtk.vtkVolumeRayCastMapper()
#volume_mapper.AutoAdjustSampleDistancesOff()
#volume_mapper.SetInput(image2)
#volume_mapper.SetVolumeRayCastFunction(composite_function)
#volume_mapper.SetGradientEstimator(gradientEstimator)
volume_mapper.IntermixIntersectingGeometryOn()
self.volume_mapper = volume_mapper
else:
if int(ses.Session().rendering) == 0:
volume_mapper = vtk.vtkFixedPointVolumeRayCastMapper()
#volume_mapper.AutoAdjustSampleDistancesOff()
self.volume_mapper = volume_mapper
volume_mapper.IntermixIntersectingGeometryOn()
else:
volume_mapper = vtk.vtkGPUVolumeRayCastMapper()
volume_mapper.UseJitteringOn()
self.volume_mapper = volume_mapper
self.SetTypeRaycasting()
volume_mapper.SetInputData(image2)
# TODO: Look to this
#volume_mapper_hw = vtk.vtkVolumeTextureMapper3D()
#volume_mapper_hw.SetInput(image2)
#Cut Plane
#CutPlane(image2, volume_mapper)
#self.color_transfer = color_transfer
volume_properties = vtk.vtkVolumeProperty()
#volume_properties.IndependentComponentsOn()
volume_properties.SetInterpolationTypeToLinear()
volume_properties.SetColor(self.color_transfer)
try:
volume_properties.SetScalarOpacity(self.opacity_transfer_func)
except NameError:
pass
if not self.volume_mapper.IsA("vtkGPUVolumeRayCastMapper"):
# Using these lines to improve the raycasting quality. These values
# seems related to the distance from ray from raycasting.
# TODO: Need to see values that improve the quality and don't decrease
# the performance. 2.0 seems to be a good value to pix_diag
pix_diag = 2.0
volume_mapper.SetImageSampleDistance(0.25)
volume_mapper.SetSampleDistance(pix_diag / 5.0)
volume_properties.SetScalarOpacityUnitDistance(pix_diag)
self.volume_properties = volume_properties
self.SetShading()
volume = vtk.vtkVolume()
volume.SetMapper(volume_mapper)
volume.SetProperty(volume_properties)
self.volume = volume
colour = self.GetBackgroundColour()
self.exist = 1
if self.plane:
self.plane.SetVolumeMapper(volume_mapper)
Publisher.sendMessage('Load volume into viewer',
volume=volume, colour=colour,
ww=self.ww, wl=self.wl)
del flip
del cast
def OnEnableTool(self, tool_name, flag):
if tool_name == _("Cut plane"):
if self.plane:
if flag:
self.plane_on = True
self.plane.Enable()
else:
self.plane_on = False
self.plane.Disable()
else:
# self.final_imagedata.Update()
self.plane_on = True
self.plane = CutPlane(self.final_imagedata,
self.volume_mapper)
def CalculateHistogram(self):
image = self.image
r = int(image.GetScalarRange()[1] - image.GetScalarRange()[0])
accumulate = vtk.vtkImageAccumulate()
accumulate.SetInputData(image)
accumulate.SetComponentExtent(0, r -1, 0, 0, 0, 0)
accumulate.SetComponentOrigin(image.GetScalarRange()[0], 0, 0)
# accumulate.ReleaseDataFlagOn()
accumulate.Update()
n_image = numpy_support.vtk_to_numpy(accumulate.GetOutput().GetPointData().GetScalars())
del accumulate
init, end = image.GetScalarRange()
Publisher.sendMessage('Load histogram', histogram=n_image, init=init, end=end)
def TranslateScale(self, scale, value):
#if value < 0:
# valor = 2**16 - abs(value)
#else:
# valor = value
return value - scale[0]
class VolumeMask:
def __init__(self, mask):
self.mask = mask
self.colour = mask.colour
self._volume_mapper = None
self._flip = None
self._color_transfer = None
self._piecewise_function = None
self._actor = None
def create_volume(self):
if self._actor is None:
if int(ses.Session().rendering) == 0:
self._volume_mapper = vtk.vtkFixedPointVolumeRayCastMapper()
#volume_mapper.AutoAdjustSampleDistancesOff()
self._volume_mapper.IntermixIntersectingGeometryOn()
pix_diag = 2.0
self._volume_mapper.SetImageSampleDistance(0.25)
self._volume_mapper.SetSampleDistance(pix_diag / 5.0)
else:
self._volume_mapper = vtk.vtkGPUVolumeRayCastMapper()
self._volume_mapper.UseJitteringOn()
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) > LooseVersion('8.0'):
self._volume_mapper.SetBlendModeToIsoSurface()
# else:
# isosurfaceFunc = vtk.vtkVolumeRayCastIsosurfaceFunction()
# isosurfaceFunc.SetIsoValue(127)
# self._volume_mapper = vtk.vtkVolumeRayCastMapper()
# self._volume_mapper.SetVolumeRayCastFunction(isosurfaceFunc)
self._flip = vtk.vtkImageFlip()
self._flip.SetInputData(self.mask.imagedata)
self._flip.SetFilteredAxis(1)
self._flip.FlipAboutOriginOn()
self._volume_mapper.SetInputConnection(self._flip.GetOutputPort())
self._volume_mapper.Update()
r, g, b = self.colour
self._color_transfer = vtk.vtkColorTransferFunction()
self._color_transfer.RemoveAllPoints()
self._color_transfer.AddRGBPoint(0.0, 0, 0, 0)
self._color_transfer.AddRGBPoint(254.0, r, g, b)
self._color_transfer.AddRGBPoint(255.0, r, g, b)
self._piecewise_function = vtk.vtkPiecewiseFunction()
self._piecewise_function.RemoveAllPoints()
self._piecewise_function.AddPoint(0.0, 0.0)
self._piecewise_function.AddPoint(127, 1.0)
self._volume_property = vtk.vtkVolumeProperty()
self._volume_property.SetColor(self._color_transfer)
self._volume_property.SetScalarOpacity(self._piecewise_function)
self._volume_property.ShadeOn()
self._volume_property.SetInterpolationTypeToLinear()
self._volume_property.SetSpecular(0.75)
self._volume_property.SetSpecularPower(2)
if not self._volume_mapper.IsA("vtkGPUVolumeRayCastMapper"):
self._volume_property.SetScalarOpacityUnitDistance(pix_diag)
else:
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) > LooseVersion('8.0'):
self._volume_property.GetIsoSurfaceValues().SetValue(0, 127)
self._actor = vtk.vtkVolume()
self._actor.SetMapper(self._volume_mapper)
self._actor.SetProperty(self._volume_property)
self._actor.Update()
def change_imagedata(self):
self._flip.SetInputData(self.mask.imagedata)
def set_colour(self, colour):
self.colour = colour
r, g, b = self.colour
self._color_transfer.RemoveAllPoints()
self._color_transfer.AddRGBPoint(0.0, 0, 0, 0)
self._color_transfer.AddRGBPoint(254.0, r, g, b)
self._color_transfer.AddRGBPoint(255.0, r, g, b)
class CutPlane:
def __init__(self, img, volume_mapper):
self.img = img
self.volume_mapper = volume_mapper
self.Create()
self.__bind_events()
def __bind_events(self):
Publisher.subscribe(self.Reset,
'Reset Cut Plane')
Publisher.subscribe(self.Enable,
'Enable Cut Plane')
Publisher.subscribe(self.Disable,
'Disable Cut Plane')
def Create(self):
self.plane_widget = plane_widget = vtk.vtkImagePlaneWidget()
plane_widget.SetInputData(self.img)
plane_widget.SetPlaneOrientationToXAxes()
#plane_widget.SetResliceInterpolateToLinear()
plane_widget.TextureVisibilityOff()
#Set left mouse button to move and rotate plane
plane_widget.SetLeftButtonAction(1)
#SetColor margin to green
margin_property = plane_widget.GetMarginProperty()
margin_property.SetColor(0,0.8,0)
#Disable cross
cursor_property = plane_widget.GetCursorProperty()
cursor_property.SetOpacity(0)
self.plane_source = plane_source = vtk.vtkPlaneSource()
plane_source.SetOrigin(plane_widget.GetOrigin())
plane_source.SetPoint1(plane_widget.GetPoint1())
plane_source.SetPoint2(plane_widget.GetPoint2())
plane_source.SetNormal(plane_widget.GetNormal())
plane_mapper = self.plane_mapper = vtk.vtkPolyDataMapper()
plane_mapper.SetInputData(plane_source.GetOutput())
self.plane_actor = plane_actor = vtk.vtkActor()
plane_actor.SetMapper(plane_mapper)
plane_actor.GetProperty().BackfaceCullingOn()
plane_actor.GetProperty().SetOpacity(0)
plane_widget.AddObserver("InteractionEvent", self.Update)
Publisher.sendMessage('AppendActor', actor=self.plane_actor)
Publisher.sendMessage('Set Widget Interactor', widget=self.plane_widget)
plane_actor.SetVisibility(1)
plane_widget.On()
self.plane = plane = vtk.vtkPlane()
plane.SetNormal(self.plane_source.GetNormal())
plane.SetOrigin(self.plane_source.GetOrigin())
self.volume_mapper.AddClippingPlane(plane)
#Storage First Position
self.origin = plane_widget.GetOrigin()
self.p1 = plane_widget.GetPoint1()
self.p2 = plane_widget.GetPoint2()
self.normal = plane_widget.GetNormal()
def SetVolumeMapper(self, volume_mapper):
self.volume_mapper = volume_mapper
self.volume_mapper.AddClippingPlane(self.plane)
def Update(self, a, b):
plane_source = self.plane_source
plane_widget = self.plane_widget
plane_source.SetOrigin(plane_widget.GetOrigin())
plane_source.SetPoint1(plane_widget.GetPoint1())
plane_source.SetPoint2(plane_widget.GetPoint2())
plane_source.SetNormal(plane_widget.GetNormal())
self.plane_actor.VisibilityOn()
self.plane.SetNormal(plane_source.GetNormal())
self.plane.SetOrigin(plane_source.GetOrigin())
Publisher.sendMessage('Render volume viewer')
def Enable(self):
self.plane_widget.On()
self.plane_actor.VisibilityOn()
self.volume_mapper.AddClippingPlane(self.plane)
Publisher.sendMessage('Render volume viewer')
def Disable(self):
self.plane_widget.Off()
self.plane_actor.VisibilityOff()
self.volume_mapper.RemoveClippingPlane(self.plane)
Publisher.sendMessage('Render volume viewer')
def Reset(self):
plane_source = self.plane_source
plane_widget = self.plane_widget
plane_source.SetOrigin(self.origin)
plane_source.SetPoint1(self.p1)
plane_source.SetPoint2(self.p2)
plane_source.SetNormal(self.normal)
self.plane_actor.VisibilityOn()
self.plane.SetNormal(self.normal)
self.plane.SetOrigin(self.origin)
Publisher.sendMessage('Render volume viewer')
def DestroyObjs(self):
Publisher.sendMessage('Remove surface actor from viewer', actor=self.plane_actor)
self.Disable()
del self.plane_widget
del self.plane_source
del self.plane_actor
del self.normal
del self.plane
| gpl-2.0 | -6,795,009,479,111,238,000 | 35.480577 | 125 | 0.574645 | false |
ntt-pf-lab/horizon | django-openstack/django_openstack/tests/broken/instance_tests.py | 21 | 2479 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for instance views.
"""
import boto.ec2.instance
import mox
from django.core.urlresolvers import reverse
from django_openstack.nova.tests.base import (BaseProjectViewTests,
TEST_PROJECT)
TEST_INSTANCE_ID = 'i-abcdefgh'
class InstanceViewTests(BaseProjectViewTests):
def test_index(self):
self.mox.StubOutWithMock(self.project, 'get_instances')
self.project.get_instances().AndReturn([])
self.mox.ReplayAll()
res = self.client.get(reverse('nova_instances', args=[TEST_PROJECT]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'django_openstack/nova/instances/index.html')
self.assertEqual(len(res.context['instances']), 0)
self.mox.VerifyAll()
def test_detail(self):
instance = boto.ec2.instance.Instance()
instance.id = TEST_INSTANCE_ID
instance.displayName = instance.id
instance.displayDescription = instance.id
self.mox.StubOutWithMock(self.project, 'get_instance')
self.project.get_instance(instance.id).AndReturn(instance)
self.mox.StubOutWithMock(self.project, 'get_instances')
self.project.get_instances().AndReturn([instance])
self.mox.ReplayAll()
res = self.client.get(reverse('nova_instances_detail',
args=[TEST_PROJECT, TEST_INSTANCE_ID]))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res,
'django_openstack/nova/instances/index.html')
self.assertEqual(res.context['selected_instance'].id, instance.id)
self.mox.VerifyAll()
| apache-2.0 | 2,569,180,666,064,712,000 | 34.927536 | 78 | 0.676886 | false |
jtraver/dev | python/prime/range2.py | 1 | 1564 | #!/usr/bin/python
limit = 100000
primes = []
dprimes = {}
def is_prime(pc):
if pc in dprimes:
return True
if pc < 0:
return False
prime = True
if pc < 1:
return prime
for idiv in xrange(1, len(primes)):
div = primes[idiv]
if div * div > pc:
break
if pc % div == 0:
prime = False
if prime and not pc in dprimes:
primes.append(pc)
dprimes[pc] = 1
return prime
def main():
maxpair = 0
maxlower = -2
intlower = 0
for pc in xrange(1, limit, 2):
if is_prime(pc):
print "%s is prime" % str(pc)
else:
continue
lower = pc - 2
upper = pc + 2
paircount = 0
lastlower = -2
while lower >= 0:
if is_prime(lower) and is_prime(upper):
print " %s + %s" % (str(lower), str(upper))
paircount += 1
lastlower = lower
lower -= 2
upper += 2
print " %s pairs for %s" % (str(paircount), str(pc))
if paircount < intlower:
intlower = paircount
print " new interim lower %s for %s (last max %s)" % (intlower, str(pc), str(maxpair))
if paircount > maxpair:
maxpair = paircount
print " new max pairs %s for %s" % (maxpair, str(pc))
intlower = maxpair
if lastlower > maxlower:
maxlower = lastlower
print " new max lower %s for %s" % (maxlower, str(pc))
main()
| mit | 8,616,596,997,687,647,000 | 25.965517 | 103 | 0.480179 | false |
Johnzero/erp | openerp/addons/project_issue/project_issue.py | 3 | 23891 | #-*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from crm import crm
from datetime import datetime
from osv import fields,osv
from tools.translate import _
import binascii
import time
import tools
from crm import wizard
wizard.mail_compose_message.SUPPORTED_MODELS.append('project.issue')
class project_issue_version(osv.osv):
_name = "project.issue.version"
_order = "name desc"
_columns = {
'name': fields.char('Version Number', size=32, required=True),
'active': fields.boolean('Active', required=False),
}
_defaults = {
'active': 1,
}
project_issue_version()
class project_issue(crm.crm_case, osv.osv):
_name = "project.issue"
_description = "Project Issue"
_order = "priority, create_date desc"
_inherit = ['mail.thread']
def write(self, cr, uid, ids, vals, context=None):
#Update last action date everytime the user change the stage, the state or send a new email
logged_fields = ['type_id', 'state', 'message_ids']
if any([field in vals for field in logged_fields]):
vals['date_action_last'] = time.strftime('%Y-%m-%d %H:%M:%S')
return super(project_issue, self).write(cr, uid, ids, vals, context)
def case_open(self, cr, uid, ids, *args):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
res = super(project_issue, self).case_open(cr, uid, ids, *args)
self.write(cr, uid, ids, {'date_open': time.strftime('%Y-%m-%d %H:%M:%S'), 'user_id' : uid})
for (id, name) in self.name_get(cr, uid, ids):
message = _("Issue '%s' has been opened.") % name
self.log(cr, uid, id, message)
return res
def case_close(self, cr, uid, ids, *args):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
res = super(project_issue, self).case_close(cr, uid, ids, *args)
for (id, name) in self.name_get(cr, uid, ids):
message = _("Issue '%s' has been closed.") % name
self.log(cr, uid, id, message)
return res
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Openday’s IDs
@return: difference between current date and log date
@param context: A standard dictionary for contextual values
"""
cal_obj = self.pool.get('resource.calendar')
res_obj = self.pool.get('resource.resource')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
res[issue.id] = {}
for field in fields:
duration = 0
ans = False
hours = 0
date_create = datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
if field in ['working_hours_open','day_open']:
if issue.date_open:
date_open = datetime.strptime(issue.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
date_until = issue.date_open
#Calculating no. of working hours to open the issue
hours = cal_obj.interval_hours_get(cr, uid, issue.project_id.resource_calendar_id.id,
date_create,
date_open)
elif field in ['working_hours_close','day_close']:
if issue.date_closed:
date_close = datetime.strptime(issue.date_closed, "%Y-%m-%d %H:%M:%S")
date_until = issue.date_closed
ans = date_close - date_create
#Calculating no. of working hours to close the issue
hours = cal_obj.interval_hours_get(cr, uid, issue.project_id.resource_calendar_id.id,
date_create,
date_close)
elif field in ['days_since_creation']:
if issue.create_date:
days_since_creation = datetime.today() - datetime.strptime(issue.create_date, "%Y-%m-%d %H:%M:%S")
res[issue.id][field] = days_since_creation.days
continue
elif field in ['inactivity_days']:
res[issue.id][field] = 0
if issue.date_action_last:
inactive_days = datetime.today() - datetime.strptime(issue.date_action_last, '%Y-%m-%d %H:%M:%S')
res[issue.id][field] = inactive_days.days
continue
if ans:
resource_id = False
if issue.user_id:
resource_ids = res_obj.search(cr, uid, [('user_id','=',issue.user_id.id)])
if resource_ids and len(resource_ids):
resource_id = resource_ids[0]
duration = float(ans.days)
if issue.project_id and issue.project_id.resource_calendar_id:
duration = float(ans.days) * 24
new_dates = cal_obj.interval_min_get(cr, uid,
issue.project_id.resource_calendar_id.id,
date_create,
duration, resource=resource_id)
no_days = []
date_until = datetime.strptime(date_until, '%Y-%m-%d %H:%M:%S')
for in_time, out_time in new_dates:
if in_time.date not in no_days:
no_days.append(in_time.date)
if out_time > date_until:
break
duration = len(no_days)
if field in ['working_hours_open','working_hours_close']:
res[issue.id][field] = hours
else:
res[issue.id][field] = abs(float(duration))
return res
def _get_issue_task(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for task in self.pool.get('project.task').browse(cr, uid, ids, context=context):
issues += issue_pool.search(cr, uid, [('task_id','=',task.id)])
return issues
def _get_issue_work(self, cr, uid, ids, context=None):
issues = []
issue_pool = self.pool.get('project.issue')
for work in self.pool.get('project.task.work').browse(cr, uid, ids, context=context):
if work.task_id:
issues += issue_pool.search(cr, uid, [('task_id','=',work.task_id.id)])
return issues
def _hours_get(self, cr, uid, ids, field_names, args, context=None):
task_pool = self.pool.get('project.task')
res = {}
for issue in self.browse(cr, uid, ids, context=context):
progress = 0.0
if issue.task_id:
progress = task_pool._hours_get(cr, uid, [issue.task_id.id], field_names, args, context=context)[issue.task_id.id]['progress']
res[issue.id] = {'progress' : progress}
return res
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Issue', size=128, required=True),
'active': fields.boolean('Active', required=False),
'create_date': fields.datetime('Creation Date', readonly=True,select=True),
'write_date': fields.datetime('Update Date', readonly=True),
'days_since_creation': fields.function(_compute_day, string='Days since creation date', \
multi='compute_day', type="integer", help="Difference in days between creation date and current date"),
'date_deadline': fields.date('Deadline'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Sales team to which Case belongs to.\
Define Responsible user and Email account for mail gateway.'),
'partner_id': fields.many2one('res.partner', 'Partner', select=1),
'partner_address_id': fields.many2one('res.partner.address', 'Partner Contact', \
domain="[('partner_id','=',partner_id)]"),
'company_id': fields.many2one('res.company', 'Company'),
'description': fields.text('Description'),
'state': fields.selection([('draft', 'New'), ('open', 'In Progress'), ('cancel', 'Cancelled'), ('done', 'Done'),('pending', 'Pending'), ], 'State', size=16, readonly=True,
help='The state is set to \'Draft\', when a case is created.\
\nIf the case is in progress the state is set to \'Open\'.\
\nWhen the case is over, the state is set to \'Done\'.\
\nIf the case needs to be reviewed then the state is set to \'Pending\'.'),
'email_from': fields.char('Email', size=128, help="These people will receive email.", select=1),
'email_cc': fields.char('Watchers Emails', size=256, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'date_open': fields.datetime('Opened', readonly=True,select=True),
# Project Issue fields
'date_closed': fields.datetime('Closed', readonly=True,select=True),
'date': fields.datetime('Date'),
'channel_id': fields.many2one('crm.case.channel', 'Channel', help="Communication channel."),
'categ_id': fields.many2one('crm.case.categ', 'Category', domain="[('object_id.model', '=', 'crm.project.bug')]"),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True),
'version_id': fields.many2one('project.issue.version', 'Version'),
'type_id': fields.many2one ('project.task.type', 'Stages', domain="[('project_ids', '=', project_id)]"),
'project_id':fields.many2one('project.project', 'Project'),
'duration': fields.float('Duration'),
'task_id': fields.many2one('project.task', 'Task', domain="[('project_id','=',project_id)]"),
'day_open': fields.function(_compute_day, string='Days to Open', \
multi='compute_day', type="float", store=True),
'day_close': fields.function(_compute_day, string='Days to Close', \
multi='compute_day', type="float", store=True),
'user_id': fields.many2one('res.users', 'Assigned to', required=False, select=1),
'working_hours_open': fields.function(_compute_day, string='Working Hours to Open the Issue', \
multi='compute_day', type="float", store=True),
'working_hours_close': fields.function(_compute_day, string='Working Hours to Close the Issue', \
multi='compute_day', type="float", store=True),
'inactivity_days': fields.function(_compute_day, string='Days since last action', \
multi='compute_day', type="integer", help="Difference in days between last action and current date"),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'user_email', type='char', string='User Email', readonly=True),
'message_ids': fields.one2many('mail.message', 'res_id', 'Messages', domain=[('model','=',_name)]),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'progress': fields.function(_hours_get, string='Progress (%)', multi='hours', group_operator="avg", help="Computed as: Time Spent / Total Time.",
store = {
'project.issue': (lambda self, cr, uid, ids, c={}: ids, ['task_id'], 10),
'project.task': (_get_issue_task, ['progress'], 10),
'project.task.work': (_get_issue_work, ['hours'], 10),
}),
}
def _get_project(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.context_project_id:
return user.context_project_id.id
return False
def on_change_project(self, cr, uid, ids, project_id, context=None):
return {}
_defaults = {
'active': 1,
'partner_id': crm.crm_case._get_default_partner,
'partner_address_id': crm.crm_case._get_default_partner_address,
'email_from': crm.crm_case._get_default_email,
'state': 'draft',
'section_id': crm.crm_case._get_section,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': crm.AVAILABLE_PRIORITIES[2][0],
'project_id':_get_project,
'categ_id' : lambda *a: False,
}
def set_priority(self, cr, uid, ids, priority):
"""Set lead priority
"""
return self.write(cr, uid, ids, {'priority' : priority})
def set_high_priority(self, cr, uid, ids, *args):
"""Set lead priority to high
"""
return self.set_priority(cr, uid, ids, '1')
def set_normal_priority(self, cr, uid, ids, *args):
"""Set lead priority to normal
"""
return self.set_priority(cr, uid, ids, '3')
def convert_issue_task(self, cr, uid, ids, context=None):
case_obj = self.pool.get('project.issue')
data_obj = self.pool.get('ir.model.data')
task_obj = self.pool.get('project.task')
if context is None:
context = {}
result = data_obj._get_id(cr, uid, 'project', 'view_task_search_form')
res = data_obj.read(cr, uid, result, ['res_id'])
id2 = data_obj._get_id(cr, uid, 'project', 'view_task_form2')
id3 = data_obj._get_id(cr, uid, 'project', 'view_task_tree2')
if id2:
id2 = data_obj.browse(cr, uid, id2, context=context).res_id
if id3:
id3 = data_obj.browse(cr, uid, id3, context=context).res_id
for bug in case_obj.browse(cr, uid, ids, context=context):
new_task_id = task_obj.create(cr, uid, {
'name': bug.name,
'partner_id': bug.partner_id.id,
'description':bug.description,
'date_deadline': bug.date,
'project_id': bug.project_id.id,
# priority must be in ['0','1','2','3','4'], while bug.priority is in ['1','2','3','4','5']
'priority': str(int(bug.priority) - 1),
'user_id': bug.user_id.id,
'planned_hours': 0.0,
})
vals = {
'task_id': new_task_id,
'state':'pending'
}
case_obj.write(cr, uid, [bug.id], vals)
return {
'name': _('Tasks'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.task',
'res_id': int(new_task_id),
'view_id': False,
'views': [(id2,'form'),(id3,'tree'),(False,'calendar'),(False,'graph')],
'type': 'ir.actions.act_window',
'search_view_id': res['res_id'],
'nodestroy': True
}
def _convert(self, cr, uid, ids, xml_id, context=None):
data_obj = self.pool.get('ir.model.data')
id2 = data_obj._get_id(cr, uid, 'project_issue', xml_id)
categ_id = False
if id2:
categ_id = data_obj.browse(cr, uid, id2, context=context).res_id
if categ_id:
self.write(cr, uid, ids, {'categ_id': categ_id})
return True
def convert_to_feature(self, cr, uid, ids, context=None):
return self._convert(cr, uid, ids, 'feature_request_categ', context=context)
def convert_to_bug(self, cr, uid, ids, context=None):
return self._convert(cr, uid, ids, 'bug_categ', context=context)
def next_type(self, cr, uid, ids, *args):
for task in self.browse(cr, uid, ids):
typeid = task.type_id.id
types = map(lambda x:x.id, task.project_id.type_ids or [])
if types:
if not typeid:
self.write(cr, uid, task.id, {'type_id': types[0]})
elif typeid and typeid in types and types.index(typeid) != len(types)-1 :
index = types.index(typeid)
self.write(cr, uid, task.id, {'type_id': types[index+1]})
return True
def prev_type(self, cr, uid, ids, *args):
for task in self.browse(cr, uid, ids):
typeid = task.type_id.id
types = map(lambda x:x.id, task.project_id and task.project_id.type_ids or [])
if types:
if typeid and typeid in types:
index = types.index(typeid)
self.write(cr, uid, task.id, {'type_id': index and types[index-1] or False})
return True
def onchange_task_id(self, cr, uid, ids, task_id, context=None):
result = {}
if not task_id:
return {'value':{}}
task = self.pool.get('project.task').browse(cr, uid, task_id, context=context)
return {'value':{'user_id': task.user_id.id,}}
def case_escalate(self, cr, uid, ids, *args):
"""Escalates case to top level
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case Ids
@param *args: Tuple Value for additional Params
"""
cases = self.browse(cr, uid, ids)
for case in cases:
data = {'state' : 'draft'}
if case.project_id.project_escalation_id:
data['project_id'] = case.project_id.project_escalation_id.id
if case.project_id.project_escalation_id.user_id:
data['user_id'] = case.project_id.project_escalation_id.user_id.id
if case.task_id:
self.pool.get('project.task').write(cr, uid, [case.task_id.id], {'project_id': data['project_id'], 'user_id': False})
else:
raise osv.except_osv(_('Warning !'), _('You cannot escalate this issue.\nThe relevant Project has not configured the Escalation Project!'))
self.write(cr, uid, [case.id], data)
self.message_append(cr, uid, cases, _('Escalate'))
return True
def message_new(self, cr, uid, msg, custom_values=None, context=None):
"""Automatically called when new email message arrives"""
if context is None:
context = {}
subject = msg.get('subject') or _('No Title')
body = msg.get('body_text')
msg_from = msg.get('from')
priority = msg.get('priority')
vals = {
'name': subject,
'email_from': msg_from,
'email_cc': msg.get('cc'),
'description': body,
'user_id': False,
}
if priority:
vals['priority'] = priority
vals.update(self.message_partner_by_email(cr, uid, msg_from))
context.update({'state_to' : 'draft'})
if custom_values and isinstance(custom_values, dict):
vals.update(custom_values)
res_id = self.create(cr, uid, vals, context)
self.message_append_dict(cr, uid, [res_id], msg, context=context)
self.convert_to_bug(cr, uid, [res_id], context=context)
return res_id
def message_update(self, cr, uid, ids, msg, vals=None, default_act='pending', context=None):
if vals is None:
vals = {}
if isinstance(ids, (str, int, long)):
ids = [ids]
vals.update({
'description': msg['body_text']
})
if msg.get('priority', False):
vals['priority'] = msg.get('priority')
maps = {
'cost': 'planned_cost',
'revenue': 'planned_revenue',
'probability': 'probability'
}
# Reassign the 'open' state to the case if this one is in pending or done
for record in self.browse(cr, uid, ids, context=context):
if record.state in ('pending', 'done'):
record.write({'state' : 'open'})
vls = { }
for line in msg['body_text'].split('\n'):
line = line.strip()
res = tools.misc.command_re.match(line)
if res and maps.get(res.group(1).lower(), False):
key = maps.get(res.group(1).lower())
vls[key] = res.group(2).lower()
vals.update(vls)
res = self.write(cr, uid, ids, vals)
self.message_append_dict(cr, uid, ids, msg, context=context)
return res
def copy(self, cr, uid, id, default=None, context=None):
issue = self.read(cr, uid, id, ['name'], context=context)
if not default:
default = {}
default = default.copy()
default['name'] = issue['name'] + _(' (copy)')
return super(project_issue, self).copy(cr, uid, id, default=default,
context=context)
project_issue()
class project(osv.osv):
_inherit = "project.project"
_columns = {
'project_escalation_id' : fields.many2one('project.project','Project Escalation', help='If any issue is escalated from the current Project, it will be listed under the project selected here.', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'reply_to' : fields.char('Reply-To Email Address', size=256)
}
def _check_escalation(self, cr, uid, ids, context=None):
project_obj = self.browse(cr, uid, ids[0], context=context)
if project_obj.project_escalation_id:
if project_obj.project_escalation_id.id == project_obj.id:
return False
return True
_constraints = [
(_check_escalation, 'Error! You cannot assign escalation to the same project!', ['project_escalation_id'])
]
project()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,199,328,082,149,382,000 | 46.102564 | 272 | 0.546753 | false |
dddsjz/autograding-for-csmoodle | Main.py | 1 | 8380 | # -*- coding: UTF-8 -*-
# !/usr/bin/env python
_author_ = 'dddsjz'
import ssl
import urllib
import urllib2
import cookielib
import json
import re
from Tkinter import *
import os
# Reference: http://cuiqingcai.com/1052.html
# 2017.11.1: logic should like this use re to get sesskey, student_id, and student_name(with a white space) and store id
# and name into a local file. User should input student_name and program will find the student_id and construct request.
# 2017.11.2: create a regular express to find the 5 digit student id which is used to recognize student in server.
# the logic for regular express is: The 5 digit number will following "id=", so 'id=(\d{5})'. And following this part will
# be some characters but never include an url which has "://" (a simple way to search the result), so '.[^://*]'
# 2017.11.27: finished all basic function, want to add GUI and download file function
# close verified certificate
ssl._create_default_https_context = ssl._create_unverified_context
# install proxy
enable_proxy = False
# https
proxy_handler = urllib2.ProxyHandler({"https": '127.0.0.1:8080'})
null_proxy_handler = urllib2.ProxyHandler({})
if enable_proxy:
opener = urllib2.build_opener(proxy_handler)
else:
opener = urllib2.build_opener(null_proxy_handler)
urllib2.install_opener(opener)
# debug proxy
#response = urllib2.urlopen('https://www.baidu.com')
#print response.read()
# Tkinter initialization
# top = Tk()
# top.title = ("Grade")
# top.geometry('200X200')
# top.resizable(width=True,height=True)
# Three frames, top with login, left with courses and subjects, right with student and grade
# frm_tp = Frame(top)
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"
# store a login cookie for current session
filename = 'cookie.txt'
cookie = cookielib.MozillaCookieJar(filename)
def login(login_name, password, course_in):
global opener
# login header
content_type = "application/x-www-form-urlencoded"
referer = "https://csmoodle.ucd.ie/moodle/login/index.php/"
headers = {"User-Agent":user_agent, "Content-Type":content_type, "Referer":referer}
# value for post login form
value = {"username":login_name, "password":password, "rememberusername":1}
data = urllib.urlencode(value)
url = "https://csmoodle.ucd.ie/moodle/login/index.php/"
req = urllib2.Request(url, data, headers)
if enable_proxy:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), proxy_handler)
else:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie), null_proxy_handler)
response = opener.open(req)
rr = response.read()
# print rr
sesskey = re.findall(r'sesskey=(\w*?)\"', rr)[0]
user_id = re.findall(r'userid=\"(\d{5,})\"', rr)[0]
print user_id
# print sesskey
# save & rewrite old file
cookie.save(ignore_discard=True, ignore_expires=True)
# ask / to find courses by courses ID
course_id = re.findall(r'id=(\d{3,}).*' + course_in, rr)
print course_id
return sesskey, user_id, course_id
# Login input
login_name = raw_input("Please Enter login name\n")
password = raw_input("Please Enter Password\n")
course_in = raw_input("Please Enter Course ID\n")
temp = login(login_name, password, course_in)
sesskey = temp[0]
user_id = temp[1]
course_id = temp[2]
# ask view.php to get inside to courses and find subject id
url = "https://csmoodle.ucd.ie/moodle/course/view.php?id="+course_id[0]+"/"
referer = "https://csmoodle.ucd.ie/moodle/"
headers = {"User-Agent":user_agent, "Referer":referer}
req = urllib2.Request(url, headers=headers)
response = opener.open(req)
rr = response.read()
print rr
# find instance name to choose the subject to grade
subjects = re.findall(r'id=(\d{5,}).*?instancename">(.*?)<', rr)
print subjects
# find subject id
subject_name = raw_input("Please Choose A Subject\n")
subject_index = -1
for i in subjects:
for j in i:
if subject_name == j:
subject_index = i[0]
print subject_index
# find assignmentid from user-info data region.
url = "https://csmoodle.ucd.ie/moodle/mod/assign/view.php?id="+subject_index+"&rownum=0&action=grader/"
referer = "https://csmoodle.ucd.ie/moodle/mod/assign/view.php?id="+subject_index+"/"
headers = {"User-Agent": user_agent, "Referer": referer}
req = urllib2.Request(url, headers=headers)
response = opener.open(req)
assignment_id = re.findall(r'data-assignmentid=\"(\d{4,})\"',response.read())[0]
print assignment_id
def grade_student(student_name, grade):
while(student_name!="Exit"):
url = "https://csmoodle.ucd.ie/moodle/mod/assign/view.php?id="+subject_index+"&action=grading/"
referer = "https://csmoodle.ucd.ie/moodle/mod/assign/view.php?id="+subject_index
headers = {"User-Agent":user_agent, "Referer":referer}
req = urllib2.Request(url, headers=headers)
response = opener.open(req)
rr = response.read()
# print response.read()
student_id = re.findall(r'id=(\d{5})(.[^://]*)'+student_name, rr)[0][0]
temp = re.findall(r'user'+student_id+'.*?([\s\S]*?)pluginfile.php(.*?)c9',rr)
#print temp
#print "temp 0 0 : "+temp[0][0] + "\n"
#print "temp 0 1 : "+temp[0][1] + "\n"
# print temp[1]
combine = temp[0][0] + 'pluginfile.php' + temp[0][1]
check = re.findall('cell c8[\s\S]*?</tr>', combine)
# print check
# print combine
print student_id
# print response.read()
if check.__len__() == 0:
download_url = re.findall(r'[a-zA-z]+://[^\s]*', re.findall(r'<a target.*?="(.*?)">', combine)[0])
# print download_url
#local.replace('\\','\/')
#local = r'C:\temp'+'\\'+student_name+r'\1.zip'
#print local
# local = re.sub(r'\\', '/', local)
# print local
local = '''D:\temp\1'''
urllib.urlretrieve(download_url[0], local)
else:
print("No any file to download")
# json header
# gradeUrl = "https://csmoodle.ucd.ie/moodle/lib/ajax/service.php?sesskey="+sesskey+"&info=mod_assign_submit_grading_form/"
content_type = "application/json"
accept_encoding = "gzip,deflate,br"
host = "csmoodle.ucd.ie"
# referer = "https://csmoodle.ucd.ie/moodle/mod/assign/view.php?id="+subject_index+"&rownum=0&action=grader&userid="+user_id
x_requested_with = "XMLHttpRequest"
headers = {"Host":host, "User-Agent":user_agent, "Accept-Encoding":accept_encoding, "Content-Type":content_type, "Referer":referer, "X-Requested-With": x_requested_with}
# json value
value = [{
"index":0,
"methodname":"mod_assign_submit_grading_form",
"args":{
"assignmentid":assignment_id,
"userid":student_id,
"jsonformdata":"\"id="+subject_index+""
"&rownum=0"
"&useridlistid="
"&attemptnumber=-1"
"&ajax=0"
"&userid="+student_id+""
"&sendstudentnotifications=false"
"&action=submitgrade"
"&sesskey="+sesskey+""
"&_qf__mod_assign_grade_form_"+student_id+"=1"
"&grade="+grade+""
"&assignfeedbackcomments_editor%5Btext%5D="
"&assignfeedbackcomments_editor%5Bformat%5D=1"
"&addattempt=0\""}}]
# data = json.dumps(value)
# print data
# gradereq = urllib2.Request(gradeUrl, data, headers)
# result = opener.open(gradereq)
# print result.read()
student_name = raw_input("Please Enter Student Name:\n")
grade = raw_input("Please Enter Grade:\n")
return 0
# ask grading web page and find student id
student_name = raw_input("Please Enter Student Name:\n")
grade = raw_input("Please Enter Grade:\n")
grade_student(student_name, grade)
| mit | -617,679,762,815,400,600 | 36.796296 | 177 | 0.604415 | false |
SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/_pydevd_bundle/pydevd_io.py | 1 | 3488 | from _pydevd_bundle import pydevd_constants
IS_PY3K = pydevd_constants.IS_PY3K
class IORedirector:
'''
This class works to wrap a stream (stdout/stderr) with an additional redirect.
'''
def __init__(self, original, new_redirect, wrap_buffer=False):
'''
:param stream original:
The stream to be wrapped (usually stdout/stderr).
:param stream new_redirect:
Usually IOBuf (below).
:param bool wrap_buffer:
Whether to create a buffer attribute (needed to mimick python 3 s
tdout/stderr which has a buffer to write binary data).
'''
self._redirect_to = (original, new_redirect)
if wrap_buffer and hasattr(original, 'buffer'):
self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
def write(self, s):
# Note that writing to the original stream may fail for some reasons
# (such as trying to write something that's not a string or having it closed).
for r in self._redirect_to:
r.write(s)
def isatty(self):
return self._redirect_to[0].isatty()
def flush(self):
for r in self._redirect_to:
r.flush()
def __getattr__(self, name):
for r in self._redirect_to:
if hasattr(r, name):
return getattr(r, name)
raise AttributeError(name)
class IOBuf:
'''This class works as a replacement for stdio and stderr.
It is a buffer and when its contents are requested, it will erase what
it has so far so that the next return will not return the same contents again.
'''
def __init__(self):
self.buflist = []
import os
self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
def getvalue(self):
b = self.buflist
self.buflist = [] # clear it
return ''.join(b) # bytes on py2, str on py3.
def write(self, s):
if not IS_PY3K:
if isinstance(s, unicode):
# can't use 'errors' as kwargs in py 2.6
s = s.encode(self.encoding, 'replace')
else:
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
self.buflist.append(s)
def isatty(self):
return False
def flush(self):
pass
def empty(self):
return len(self.buflist) == 0
class _RedirectionsHolder:
_stack_stdout = []
_stack_stderr = []
def start_redirect(keep_original_redirection=False, std='stdout'):
'''
@param std: 'stdout', 'stderr', or 'both'
'''
import sys
buf = IOBuf()
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
original = getattr(sys, std)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
stack.append(original)
if keep_original_redirection:
setattr(sys, std, IORedirector(getattr(sys, std), buf))
else:
setattr(sys, std, buf)
return buf
def end_redirect(std='stdout'):
import sys
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
setattr(sys, std, stack.pop())
| bsd-3-clause | 2,334,412,119,029,055,000 | 27.811966 | 86 | 0.561067 | false |
teamosceola/bitbake | lib/bb/ui/crumbs/hoblistmodel.py | 1 | 27825 | #
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2011 Intel Corporation
#
# Authored by Joshua Lock <[email protected]>
# Authored by Dongxiao Xu <[email protected]>
# Authored by Shane Wang <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import gobject
#
# PackageListModel
#
class PackageListModel(gtk.TreeStore):
"""
This class defines an gtk.TreeStore subclass which will convert the output
of the bb.event.TargetsTreeGenerated event into a gtk.TreeStore whilst also
providing convenience functions to access gtk.TreeModel subclasses which
provide filtered views of the data.
"""
(COL_NAME, COL_VER, COL_REV, COL_RNM, COL_SEC, COL_SUM, COL_RDEP, COL_RPROV, COL_SIZE, COL_BINB, COL_INC) = range(11)
__gsignals__ = {
"packagelist-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"package-selection-changed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
}
def __init__(self):
self.contents = None
self.images = None
self.pkgs_size = 0
self.pn_path = {}
self.pkg_path = {}
self.rprov_pkg = {}
gtk.TreeStore.__init__ (self,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
"""
Find the model path for the item_name
Returns the path in the model or None
"""
def find_path_for_item(self, item_name):
pkg = item_name
if item_name not in self.pkg_path.keys():
if item_name not in self.rprov_pkg.keys():
return None
pkg = self.rprov_pkg[item_name]
if pkg not in self.pkg_path.keys():
return None
return self.pkg_path[pkg]
def find_item_for_path(self, item_path):
return self[item_path][self.COL_NAME]
"""
Helper function to determine whether an item is an item specified by filter
"""
def tree_model_filter(self, model, it, filter):
for key in filter.keys():
if model.get_value(it, key) not in filter[key]:
return False
return True
"""
Create, if required, and return a filtered gtk.TreeModelSort
containing only the items specified by filter
"""
def tree_model(self, filter):
model = self.filter_new()
model.set_visible_func(self.tree_model_filter, filter)
sort = gtk.TreeModelSort(model)
sort.set_sort_column_id(PackageListModel.COL_NAME, gtk.SORT_ASCENDING)
sort.set_default_sort_func(None)
return sort
def convert_vpath_to_path(self, view_model, view_path):
# view_model is the model sorted
# get the path of the model filtered
filtered_model_path = view_model.convert_path_to_child_path(view_path)
# get the model filtered
filtered_model = view_model.get_model()
# get the path of the original model
path = filtered_model.convert_path_to_child_path(filtered_model_path)
return path
def convert_path_to_vpath(self, view_model, path):
name = self.find_item_for_path(path)
it = view_model.get_iter_first()
while it:
child_it = view_model.iter_children(it)
while child_it:
view_name = view_model.get_value(child_it, self.COL_NAME)
if view_name == name:
view_path = view_model.get_path(child_it)
return view_path
child_it = view_model.iter_next(child_it)
it = view_model.iter_next(it)
return None
"""
The populate() function takes as input the data from a
bb.event.PackageInfo event and populates the package list.
Once the population is done it emits gsignal packagelist-populated
to notify any listeners that the model is ready
"""
def populate(self, pkginfolist):
self.clear()
self.pkgs_size = 0
self.pn_path = {}
self.pkg_path = {}
self.rprov_pkg = {}
for pkginfo in pkginfolist:
pn = pkginfo['PN']
pv = pkginfo['PV']
pr = pkginfo['PR']
if pn in self.pn_path.keys():
pniter = self.get_iter(self.pn_path[pn])
else:
pniter = self.append(None)
self.set(pniter, self.COL_NAME, pn + '-' + pv + '-' + pr,
self.COL_INC, False)
self.pn_path[pn] = self.get_path(pniter)
pkg = pkginfo['PKG']
pkgv = pkginfo['PKGV']
pkgr = pkginfo['PKGR']
pkgsize = pkginfo['PKGSIZE_%s' % pkg] if 'PKGSIZE_%s' % pkg in pkginfo.keys() else "0"
pkg_rename = pkginfo['PKG_%s' % pkg] if 'PKG_%s' % pkg in pkginfo.keys() else ""
section = pkginfo['SECTION_%s' % pkg] if 'SECTION_%s' % pkg in pkginfo.keys() else ""
summary = pkginfo['SUMMARY_%s' % pkg] if 'SUMMARY_%s' % pkg in pkginfo.keys() else ""
rdep = pkginfo['RDEPENDS_%s' % pkg] if 'RDEPENDS_%s' % pkg in pkginfo.keys() else ""
rrec = pkginfo['RRECOMMENDS_%s' % pkg] if 'RRECOMMENDS_%s' % pkg in pkginfo.keys() else ""
rprov = pkginfo['RPROVIDES_%s' % pkg] if 'RPROVIDES_%s' % pkg in pkginfo.keys() else ""
for i in rprov.split():
self.rprov_pkg[i] = pkg
if 'ALLOW_EMPTY_%s' % pkg in pkginfo.keys():
allow_empty = pkginfo['ALLOW_EMPTY_%s' % pkg]
elif 'ALLOW_EMPTY' in pkginfo.keys():
allow_empty = pkginfo['ALLOW_EMPTY']
else:
allow_empty = ""
if pkgsize == "0" and not allow_empty:
continue
if len(pkgsize) > 3:
size = '%.1f' % (int(pkgsize)*1.0/1024) + ' MB'
else:
size = pkgsize + ' KB'
it = self.append(pniter)
self.pkg_path[pkg] = self.get_path(it)
self.set(it, self.COL_NAME, pkg, self.COL_VER, pkgv,
self.COL_REV, pkgr, self.COL_RNM, pkg_rename,
self.COL_SEC, section, self.COL_SUM, summary,
self.COL_RDEP, rdep + ' ' + rrec,
self.COL_RPROV, rprov, self.COL_SIZE, size,
self.COL_BINB, "", self.COL_INC, False)
self.emit("packagelist-populated")
"""
Check whether the item at item_path is included or not
"""
def path_included(self, item_path):
return self[item_path][self.COL_INC]
"""
Update the model, send out the notification.
"""
def selection_change_notification(self):
self.emit("package-selection-changed")
"""
Mark a certain package as selected.
All its dependencies are marked as selected.
The recipe provides the package is marked as selected.
If user explicitly selects a recipe, all its providing packages are selected
"""
def include_item(self, item_path, binb=""):
if self.path_included(item_path):
return
item_name = self[item_path][self.COL_NAME]
item_rdep = self[item_path][self.COL_RDEP]
self[item_path][self.COL_INC] = True
self.selection_change_notification()
it = self.get_iter(item_path)
# If user explicitly selects a recipe, all its providing packages are selected.
if not self[item_path][self.COL_VER] and binb == "User Selected":
child_it = self.iter_children(it)
while child_it:
child_path = self.get_path(child_it)
child_included = self.path_included(child_path)
if not child_included:
self.include_item(child_path, binb="User Selected")
child_it = self.iter_next(child_it)
return
# The recipe provides the package is also marked as selected
parent_it = self.iter_parent(it)
if parent_it:
parent_path = self.get_path(parent_it)
self[parent_path][self.COL_INC] = True
item_bin = self[item_path][self.COL_BINB].split(', ')
if binb and not binb in item_bin:
item_bin.append(binb)
self[item_path][self.COL_BINB] = ', '.join(item_bin).lstrip(', ')
if item_rdep:
# Ensure all of the items deps are included and, where appropriate,
# add this item to their COL_BINB
for dep in item_rdep.split(" "):
if dep.startswith('('):
continue
# If the contents model doesn't already contain dep, add it
dep_path = self.find_path_for_item(dep)
if not dep_path:
continue
dep_included = self.path_included(dep_path)
if dep_included and not dep in item_bin:
# don't set the COL_BINB to this item if the target is an
# item in our own COL_BINB
dep_bin = self[dep_path][self.COL_BINB].split(', ')
if not item_name in dep_bin:
dep_bin.append(item_name)
self[dep_path][self.COL_BINB] = ', '.join(dep_bin).lstrip(', ')
elif not dep_included:
self.include_item(dep_path, binb=item_name)
"""
Mark a certain package as de-selected.
All other packages that depends on this package are marked as de-selected.
If none of the packages provided by the recipe, the recipe should be marked as de-selected.
If user explicitly de-select a recipe, all its providing packages are de-selected.
"""
def exclude_item(self, item_path):
if not self.path_included(item_path):
return
self[item_path][self.COL_INC] = False
self.selection_change_notification()
item_name = self[item_path][self.COL_NAME]
item_rdep = self[item_path][self.COL_RDEP]
it = self.get_iter(item_path)
# If user explicitly de-select a recipe, all its providing packages are de-selected.
if not self[item_path][self.COL_VER]:
child_it = self.iter_children(it)
while child_it:
child_path = self.get_path(child_it)
child_included = self[child_path][self.COL_INC]
if child_included:
self.exclude_item(child_path)
child_it = self.iter_next(child_it)
return
# If none of the packages provided by the recipe, the recipe should be marked as de-selected.
parent_it = self.iter_parent(it)
peer_iter = self.iter_children(parent_it)
enabled = 0
while peer_iter:
peer_path = self.get_path(peer_iter)
if self[peer_path][self.COL_INC]:
enabled = 1
break
peer_iter = self.iter_next(peer_iter)
if not enabled:
parent_path = self.get_path(parent_it)
self[parent_path][self.COL_INC] = False
# All packages that depends on this package are de-selected.
if item_rdep:
for dep in item_rdep.split(" "):
if dep.startswith('('):
continue
dep_path = self.find_path_for_item(dep)
if not dep_path:
continue
dep_bin = self[dep_path][self.COL_BINB].split(', ')
if item_name in dep_bin:
dep_bin.remove(item_name)
self[dep_path][self.COL_BINB] = ', '.join(dep_bin).lstrip(', ')
item_bin = self[item_path][self.COL_BINB].split(', ')
if item_bin:
for binb in item_bin:
binb_path = self.find_path_for_item(binb)
if not binb_path:
continue
self.exclude_item(binb_path)
"""
Package model may be incomplete, therefore when calling the
set_selected_packages(), some packages will not be set included.
Return the un-set packages list.
"""
def set_selected_packages(self, packagelist):
left = []
for pn in packagelist:
if pn in self.pkg_path.keys():
path = self.pkg_path[pn]
self.include_item(item_path=path,
binb="User Selected")
else:
left.append(pn)
return left
def get_selected_packages(self):
packagelist = []
it = self.get_iter_first()
while it:
child_it = self.iter_children(it)
while child_it:
if self.get_value(child_it, self.COL_INC):
name = self.get_value(child_it, self.COL_NAME)
packagelist.append(name)
child_it = self.iter_next(child_it)
it = self.iter_next(it)
return packagelist
"""
Return the selected package size, unit is KB.
"""
def get_packages_size(self):
packages_size = 0
it = self.get_iter_first()
while it:
child_it = self.iter_children(it)
while child_it:
if self.get_value(child_it, self.COL_INC):
str_size = self.get_value(child_it, self.COL_SIZE)
if not str_size:
continue
unit = str_size.split()
if unit[1] == 'MB':
size = float(unit[0])*1024
else:
size = float(unit[0])
packages_size += size
child_it = self.iter_next(child_it)
it = self.iter_next(it)
return "%f" % packages_size
"""
Empty self.contents by setting the include of each entry to None
"""
def reset(self):
self.pkgs_size = 0
it = self.get_iter_first()
while it:
self.set(it, self.COL_INC, False)
child_it = self.iter_children(it)
while child_it:
self.set(child_it,
self.COL_INC, False,
self.COL_BINB, "")
child_it = self.iter_next(child_it)
it = self.iter_next(it)
self.selection_change_notification()
#
# RecipeListModel
#
class RecipeListModel(gtk.ListStore):
"""
This class defines an gtk.ListStore subclass which will convert the output
of the bb.event.TargetsTreeGenerated event into a gtk.ListStore whilst also
providing convenience functions to access gtk.TreeModel subclasses which
provide filtered views of the data.
"""
(COL_NAME, COL_DESC, COL_LIC, COL_GROUP, COL_DEPS, COL_BINB, COL_TYPE, COL_INC, COL_IMG, COL_INSTALL, COL_PN) = range(11)
__dummy_image__ = "--select a base image--"
__gsignals__ = {
"recipelist-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"recipe-selection-changed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
}
"""
"""
def __init__(self):
gtk.ListStore.__init__ (self,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING)
"""
Find the model path for the item_name
Returns the path in the model or None
"""
def find_path_for_item(self, item_name):
if self.non_target_name(item_name) or item_name not in self.pn_path.keys():
return None
else:
return self.pn_path[item_name]
def find_item_for_path(self, item_path):
return self[item_path][self.COL_NAME]
"""
Helper method to determine whether name is a target pn
"""
def non_target_name(self, name):
if name and ('-native' in name):
return True
return False
"""
Helper function to determine whether an item is an item specified by filter
"""
def tree_model_filter(self, model, it, filter):
name = model.get_value(it, self.COL_NAME)
if self.non_target_name(name):
return False
for key in filter.keys():
if model.get_value(it, key) not in filter[key]:
return False
return True
def sort_func(self, model, iter1, iter2):
val1 = model.get_value(iter1, RecipeListModel.COL_NAME)
val2 = model.get_value(iter2, RecipeListModel.COL_NAME)
return val1 > val2
"""
Create, if required, and return a filtered gtk.TreeModelSort
containing only the items which are items specified by filter
"""
def tree_model(self, filter):
model = self.filter_new()
model.set_visible_func(self.tree_model_filter, filter)
sort = gtk.TreeModelSort(model)
sort.set_default_sort_func(self.sort_func)
return sort
def convert_vpath_to_path(self, view_model, view_path):
filtered_model_path = view_model.convert_path_to_child_path(view_path)
filtered_model = view_model.get_model()
# get the path of the original model
path = filtered_model.convert_path_to_child_path(filtered_model_path)
return path
def convert_path_to_vpath(self, view_model, path):
it = view_model.get_iter_first()
while it:
name = self.find_item_for_path(path)
view_name = view_model.get_value(it, RecipeListModel.COL_NAME)
if view_name == name:
view_path = view_model.get_path(it)
return view_path
it = view_model.iter_next(it)
return None
"""
The populate() function takes as input the data from a
bb.event.TargetsTreeGenerated event and populates the RecipeList.
Once the population is done it emits gsignal recipelist-populated
to notify any listeners that the model is ready
"""
def populate(self, event_model):
# First clear the model, in case repopulating
self.clear()
# dummy image for prompt
self.set(self.append(), self.COL_NAME, self.__dummy_image__,
self.COL_DESC, "",
self.COL_LIC, "", self.COL_GROUP, "",
self.COL_DEPS, "", self.COL_BINB, "",
self.COL_TYPE, "image", self.COL_INC, False,
self.COL_IMG, False, self.COL_INSTALL, "", self.COL_PN, self.__dummy_image__)
for item in event_model["pn"]:
name = item
desc = event_model["pn"][item]["description"]
lic = event_model["pn"][item]["license"]
group = event_model["pn"][item]["section"]
install = []
depends = event_model["depends"].get(item, [])
rdepends = event_model["rdepends-pn"].get(item, [])
depends = depends + rdepends
if ('task-' in name):
if ('lib32-' in name or 'lib64-' in name):
atype = 'mltask'
else:
atype = 'task'
elif ('-image-' in name):
atype = 'image'
install = rdepends
elif ('meta-' in name):
atype = 'toolchain'
elif (name == 'dummy-image' or name == 'dummy-toolchain'):
atype = 'dummy'
else:
if ('lib32-' in name or 'lib64-' in name):
atype = 'mlrecipe'
else:
atype = 'recipe'
self.set(self.append(), self.COL_NAME, item, self.COL_DESC, desc,
self.COL_LIC, lic, self.COL_GROUP, group,
self.COL_DEPS, " ".join(depends), self.COL_BINB, "",
self.COL_TYPE, atype, self.COL_INC, False,
self.COL_IMG, False, self.COL_INSTALL, " ".join(install), self.COL_PN, item)
self.pn_path = {}
it = self.get_iter_first()
while it:
pn = self.get_value(it, self.COL_NAME)
path = self.get_path(it)
self.pn_path[pn] = path
it = self.iter_next(it)
self.emit("recipelist-populated")
"""
Update the model, send out the notification.
"""
def selection_change_notification(self):
self.emit("recipe-selection-changed")
def path_included(self, item_path):
return self[item_path][self.COL_INC]
"""
Append a certain image into the combobox
"""
def image_list_append(self, name, deps, install):
# check whether a certain image is there
if not name or self.find_path_for_item(name):
return
it = self.append()
self.set(it, self.COL_NAME, name, self.COL_DESC, "",
self.COL_LIC, "", self.COL_GROUP, "",
self.COL_DEPS, deps, self.COL_BINB, "",
self.COL_TYPE, "image", self.COL_INC, False,
self.COL_IMG, False, self.COL_INSTALL, install,
self.COL_PN, name)
self.pn_path[name] = self.get_path(it)
"""
Add this item, and any of its dependencies, to the image contents
"""
def include_item(self, item_path, binb="", image_contents=False):
if self.path_included(item_path):
return
item_name = self[item_path][self.COL_NAME]
item_deps = self[item_path][self.COL_DEPS]
self[item_path][self.COL_INC] = True
self.selection_change_notification()
item_bin = self[item_path][self.COL_BINB].split(', ')
if binb and not binb in item_bin:
item_bin.append(binb)
self[item_path][self.COL_BINB] = ', '.join(item_bin).lstrip(', ')
# We want to do some magic with things which are brought in by the
# base image so tag them as so
if image_contents:
self[item_path][self.COL_IMG] = True
if item_deps:
# Ensure all of the items deps are included and, where appropriate,
# add this item to their COL_BINB
for dep in item_deps.split(" "):
# If the contents model doesn't already contain dep, add it
dep_path = self.find_path_for_item(dep)
if not dep_path:
continue
dep_included = self.path_included(dep_path)
if dep_included and not dep in item_bin:
# don't set the COL_BINB to this item if the target is an
# item in our own COL_BINB
dep_bin = self[dep_path][self.COL_BINB].split(', ')
if not item_name in dep_bin:
dep_bin.append(item_name)
self[dep_path][self.COL_BINB] = ', '.join(dep_bin).lstrip(', ')
elif not dep_included:
self.include_item(dep_path, binb=item_name, image_contents=image_contents)
def exclude_item(self, item_path):
if not self.path_included(item_path):
return
self[item_path][self.COL_INC] = False
self.selection_change_notification()
item_name = self[item_path][self.COL_NAME]
item_deps = self[item_path][self.COL_DEPS]
if item_deps:
for dep in item_deps.split(" "):
dep_path = self.find_path_for_item(dep)
if not dep_path:
continue
dep_bin = self[dep_path][self.COL_BINB].split(', ')
if item_name in dep_bin:
dep_bin.remove(item_name)
self[dep_path][self.COL_BINB] = ', '.join(dep_bin).lstrip(', ')
item_bin = self[item_path][self.COL_BINB].split(', ')
if item_bin:
for binb in item_bin:
binb_path = self.find_path_for_item(binb)
if not binb_path:
continue
self.exclude_item(binb_path)
def reset(self):
it = self.get_iter_first()
while it:
self.set(it,
self.COL_INC, False,
self.COL_BINB, "",
self.COL_IMG, False)
it = self.iter_next(it)
self.selection_change_notification()
"""
Returns two lists. One of user selected recipes and the other containing
all selected recipes
"""
def get_selected_recipes(self):
allrecipes = []
userrecipes = []
it = self.get_iter_first()
while it:
if self.get_value(it, self.COL_INC):
name = self.get_value(it, self.COL_PN)
type = self.get_value(it, self.COL_TYPE)
if type != "image":
allrecipes.append(name)
sel = "User Selected" in self.get_value(it, self.COL_BINB)
if sel:
userrecipes.append(name)
it = self.iter_next(it)
return list(set(userrecipes)), list(set(allrecipes))
def set_selected_recipes(self, recipelist):
for pn in recipelist:
if pn in self.pn_path.keys():
path = self.pn_path[pn]
self.include_item(item_path=path,
binb="User Selected")
def get_selected_image(self):
it = self.get_iter_first()
while it:
if self.get_value(it, self.COL_INC):
name = self.get_value(it, self.COL_PN)
type = self.get_value(it, self.COL_TYPE)
if type == "image":
sel = "User Selected" in self.get_value(it, self.COL_BINB)
if sel:
return name
it = self.iter_next(it)
return None
def set_selected_image(self, img):
if img == None:
return
path = self.find_path_for_item(img)
self.include_item(item_path=path,
binb="User Selected",
image_contents=True)
| gpl-2.0 | -4,024,867,894,714,148,400 | 36.5 | 125 | 0.534304 | false |
nearlyfreeapps/python-googleadwords | tests/adspygoogle/adwords/v201206/remarketing_unittest.py | 1 | 1751 | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Remarketing examples."""
__author__ = '[email protected] (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from examples.adspygoogle.adwords.v201206.remarketing import add_audience
from examples.adspygoogle.adwords.v201206.remarketing import add_conversion_tracker
from tests.adspygoogle.adwords import client
from tests.adspygoogle.adwords import SERVER_V201206
from tests.adspygoogle.adwords import TEST_VERSION_V201206
from tests.adspygoogle.adwords import VERSION_V201206
class Remarketing(unittest.TestCase):
"""Unittest suite for Remarketing code examples."""
SERVER = SERVER_V201206
VERSION = VERSION_V201206
client.debug = False
def setUp(self):
"""Prepare unittest."""
client.use_mcc = False
def testAddAudience(self):
"""Tests whether we can add an audience."""
add_audience.main(client)
def testAddConversionTracker(self):
"""Test whether we can add a conversion tracker."""
add_conversion_tracker.main(client)
if __name__ == '__main__':
if TEST_VERSION_V201206:
unittest.main()
| apache-2.0 | 6,638,489,876,345,648,000 | 29.719298 | 83 | 0.741291 | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.2/Lib/distutils/command/bdist.py | 1 | 4916 | """distutils.command.bdist
Implements the Distutils 'bdist' command (create a built [binary]
distribution)."""
# created 2000/03/29, Greg Ward
__revision__ = "$Id: bdist.py,v 1.21 2001/12/06 20:57:12 fdrake Exp $"
import os, string
from types import *
from distutils.core import Command
from distutils.errors import *
from distutils.util import get_platform
def show_formats ():
"""Print list of available formats (arguments to "--format" option).
"""
from distutils.fancy_getopt import FancyGetopt
formats=[]
for format in bdist.format_commands:
formats.append(("formats=" + format, None,
bdist.format_command[format][1]))
pretty_printer = FancyGetopt(formats)
pretty_printer.print_help("List of available distribution formats:")
class bdist (Command):
description = "create a built (binary) distribution"
user_options = [('bdist-base=', 'b',
"temporary directory for creating built distributions"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('formats=', None,
"formats for distribution (comma-separated list)"),
('dist-dir=', 'd',
"directory to put final built distributions in "
"[default: dist]"),
]
help_options = [
('help-formats', None,
"lists available distribution formats", show_formats),
]
# The following commands do not take a format option from bdist
no_format_option = ('bdist_rpm',)
# This won't do in reality: will need to distinguish RPM-ish Linux,
# Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
default_format = { 'posix': 'gztar',
'nt': 'zip', }
# Establish the preferred order (for the --help-formats option).
format_commands = ['rpm', 'gztar', 'bztar', 'ztar', 'tar',
'wininst', 'zip']
# And the real information.
format_command = { 'rpm': ('bdist_rpm', "RPM distribution"),
'gztar': ('bdist_dumb', "gzip'ed tar file"),
'bztar': ('bdist_dumb', "bzip2'ed tar file"),
'ztar': ('bdist_dumb', "compressed tar file"),
'tar': ('bdist_dumb', "tar file"),
'wininst': ('bdist_wininst',
"Windows executable installer"),
'zip': ('bdist_dumb', "ZIP file"),
}
def initialize_options (self):
self.bdist_base = None
self.plat_name = None
self.formats = None
self.dist_dir = None
# initialize_options()
def finalize_options (self):
# have to finalize 'plat_name' before 'bdist_base'
if self.plat_name is None:
self.plat_name = get_platform()
# 'bdist_base' -- parent of per-built-distribution-format
# temporary directories (eg. we'll probably have
# "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
if self.bdist_base is None:
build_base = self.get_finalized_command('build').build_base
self.bdist_base = os.path.join(build_base,
'bdist.' + self.plat_name)
self.ensure_string_list('formats')
if self.formats is None:
try:
self.formats = [self.default_format[os.name]]
except KeyError:
raise DistutilsPlatformError, \
"don't know how to create built distributions " + \
"on platform %s" % os.name
if self.dist_dir is None:
self.dist_dir = "dist"
# finalize_options()
def run (self):
# Figure out which sub-commands we need to run.
commands = []
for format in self.formats:
try:
commands.append(self.format_command[format][0])
except KeyError:
raise DistutilsOptionError, "invalid format '%s'" % format
# Reinitialize and run each command.
for i in range(len(self.formats)):
cmd_name = commands[i]
sub_cmd = self.reinitialize_command(cmd_name)
if cmd_name not in self.no_format_option:
sub_cmd.format = self.formats[i]
print ("bdist.run: format=%s, command=%s, rest=%s" %
(self.formats[i], cmd_name, commands[i+1:]))
# If we're going to need to run this command again, tell it to
# keep its temporary files around so subsequent runs go faster.
if cmd_name in commands[i+1:]:
sub_cmd.keep_temp = 1
self.run_command(cmd_name)
# run()
# class bdist
| mit | 6,128,645,822,881,754,000 | 34.366906 | 77 | 0.543938 | false |
Yannig/ansible-modules-core | cloud/openstack/keystone_user.py | 9 | 12566 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Based on Jimmy Tang's implementation
DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.
options:
login_user:
description:
- login username to authenticate to keystone
required: false
default: admin
login_password:
description:
- Password of login user
required: false
default: 'yes'
login_tenant_name:
description:
- The tenant login_user belongs to
required: false
default: None
version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
required: false
default: None
endpoint:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
user:
description:
- The name of the user that has to added/removed from OpenStack
required: false
default: None
password:
description:
- The password to be assigned to the user
required: false
default: None
tenant:
description:
- The tenant name that has be added/removed
required: false
default: None
tenant_description:
description:
- A description for the tenant
required: false
default: None
email:
description:
- An email address for the user
required: false
default: None
role:
description:
- The name of the role to be assigned or created
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
requirements:
- "python >= 2.6"
- python-keystoneclient
author: Lorin Hochstein
'''
EXAMPLES = '''
# Create a tenant
- keystone_user: tenant=demo tenant_description="Default Tenant"
# Create a user
- keystone_user: user=john tenant=demo password=secrete
# Apply the admin role to the john user in the demo tenant
- keystone_user: role=admin user=john tenant=demo
'''
try:
from keystoneclient.v2_0 import client
except ImportError:
keystoneclient_found = False
else:
keystoneclient_found = True
def authenticate(endpoint, token, login_user, login_password, login_tenant_name):
"""Return a keystone client object"""
if token:
return client.Client(endpoint=endpoint, token=token)
else:
return client.Client(auth_url=endpoint, username=login_user,
password=login_password, tenant_name=login_tenant_name)
def tenant_exists(keystone, tenant):
""" Return True if tenant already exists"""
return tenant in [x.name for x in keystone.tenants.list()]
def user_exists(keystone, user):
"""" Return True if user already exists"""
return user in [x.name for x in keystone.users.list()]
def get_tenant(keystone, name):
""" Retrieve a tenant by name"""
tenants = [x for x in keystone.tenants.list() if x.name == name]
count = len(tenants)
if count == 0:
raise KeyError("No keystone tenants with name %s" % name)
elif count > 1:
raise ValueError("%d tenants with name %s" % (count, name))
else:
return tenants[0]
def get_user(keystone, name):
""" Retrieve a user by name"""
users = [x for x in keystone.users.list() if x.name == name]
count = len(users)
if count == 0:
raise KeyError("No keystone users with name %s" % name)
elif count > 1:
raise ValueError("%d users with name %s" % (count, name))
else:
return users[0]
def get_role(keystone, name):
""" Retrieve a role by name"""
roles = [x for x in keystone.roles.list() if x.name == name]
count = len(roles)
if count == 0:
raise KeyError("No keystone roles with name %s" % name)
elif count > 1:
raise ValueError("%d roles with name %s" % (count, name))
else:
return roles[0]
def get_tenant_id(keystone, name):
return get_tenant(keystone, name).id
def get_user_id(keystone, name):
return get_user(keystone, name).id
def ensure_tenant_exists(keystone, tenant_name, tenant_description,
check_mode):
""" Ensure that a tenant exists.
Return (True, id) if a new tenant was created, (False, None) if it
already existed.
"""
# Check if tenant already exists
try:
tenant = get_tenant(keystone, tenant_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
if tenant.description == tenant_description:
return (False, tenant.id)
else:
# We need to update the tenant description
if check_mode:
return (True, tenant.id)
else:
tenant.update(description=tenant_description)
return (True, tenant.id)
# We now know we will have to create a new tenant
if check_mode:
return (True, None)
ks_tenant = keystone.tenants.create(tenant_name=tenant_name,
description=tenant_description,
enabled=True)
return (True, ks_tenant.id)
def ensure_tenant_absent(keystone, tenant, check_mode):
""" Ensure that a tenant does not exist
Return True if the tenant was removed, False if it didn't exist
in the first place
"""
if not tenant_exists(keystone, tenant):
return False
# We now know we will have to delete the tenant
if check_mode:
return True
def ensure_user_exists(keystone, user_name, password, email, tenant_name,
check_mode):
""" Check if user exists
Return (True, id) if a new user was created, (False, id) user alrady
exists
"""
# Check if tenant already exists
try:
user = get_user(keystone, user_name)
except KeyError:
# Tenant doesn't exist yet
pass
else:
# User does exist, we're done
return (False, user.id)
# We now know we will have to create a new user
if check_mode:
return (True, None)
tenant = get_tenant(keystone, tenant_name)
user = keystone.users.create(name=user_name, password=password,
email=email, tenant_id=tenant.id)
return (True, user.id)
def ensure_role_exists(keystone, user_name, tenant_name, role_name,
check_mode):
""" Check if role exists
Return (True, id) if a new role was created or if the role was newly
assigned to the user for the tenant. (False, id) if the role already
exists and was already assigned to the user ofr the tenant.
"""
# Check if the user has the role in the tenant
user = get_user(keystone, user_name)
tenant = get_tenant(keystone, tenant_name)
roles = [x for x in keystone.roles.roles_for_user(user, tenant)
if x.name == role_name]
count = len(roles)
if count == 1:
# If the role is in there, we are done
role = roles[0]
return (False, role.id)
elif count > 1:
# Too many roles with the same name, throw an error
raise ValueError("%d roles with name %s" % (count, role_name))
# At this point, we know we will need to make changes
if check_mode:
return (True, None)
# Get the role if it exists
try:
role = get_role(keystone, role_name)
except KeyError:
# Role doesn't exist yet
role = keystone.roles.create(role_name)
# Associate the role with the user in the admin
keystone.roles.add_user_role(user, role, tenant)
return (True, role.id)
def ensure_user_absent(keystone, user, check_mode):
raise NotImplementedError("Not yet implemented")
def ensure_role_absent(keystone, uesr, tenant, role, check_mode):
raise NotImplementedError("Not yet implemented")
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
tenant_description=dict(required=False),
email=dict(required=False),
user=dict(required=False),
tenant=dict(required=False),
password=dict(required=False),
role=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=False,
default="http://127.0.0.1:35357/v2.0"),
token=dict(required=False),
login_user=dict(required=False),
login_password=dict(required=False),
login_tenant_name=dict(required=False)
))
# keystone operations themselves take an endpoint, not a keystone auth_url
del(argument_spec['auth_url'])
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['token', 'login_user'],
['token', 'login_password'],
['token', 'login_tenant_name']]
)
if not keystoneclient_found:
module.fail_json(msg="the python-keystoneclient module is required")
user = module.params['user']
password = module.params['password']
tenant = module.params['tenant']
tenant_description = module.params['tenant_description']
email = module.params['email']
role = module.params['role']
state = module.params['state']
endpoint = module.params['endpoint']
token = module.params['token']
login_user = module.params['login_user']
login_password = module.params['login_password']
login_tenant_name = module.params['login_tenant_name']
keystone = authenticate(endpoint, token, login_user, login_password, login_tenant_name)
check_mode = module.check_mode
try:
d = dispatch(keystone, user, password, tenant, tenant_description,
email, role, state, endpoint, token, login_user,
login_password, check_mode)
except Exception, e:
if check_mode:
# If we have a failure in check mode
module.exit_json(changed=True,
msg="exception: %s" % e)
else:
module.fail_json(msg="exception: %s" % e)
else:
module.exit_json(**d)
def dispatch(keystone, user=None, password=None, tenant=None,
tenant_description=None, email=None, role=None,
state="present", endpoint=None, token=None, login_user=None,
login_password=None, check_mode=False):
""" Dispatch to the appropriate method.
Returns a dict that will be passed to exit_json
tenant user role state
------ ---- ---- --------
X present ensure_tenant_exists
X absent ensure_tenant_absent
X X present ensure_user_exists
X X absent ensure_user_absent
X X X present ensure_role_exists
X X X absent ensure_role_absent
"""
changed = False
id = None
if tenant and not user and not role and state == "present":
changed, id = ensure_tenant_exists(keystone, tenant,
tenant_description, check_mode)
elif tenant and not user and not role and state == "absent":
changed = ensure_tenant_absent(keystone, tenant, check_mode)
elif tenant and user and not role and state == "present":
changed, id = ensure_user_exists(keystone, user, password,
email, tenant, check_mode)
elif tenant and user and not role and state == "absent":
changed = ensure_user_absent(keystone, user, check_mode)
elif tenant and user and role and state == "present":
changed, id = ensure_role_exists(keystone, user, tenant, role,
check_mode)
elif tenant and user and role and state == "absent":
changed = ensure_role_absent(keystone, user, tenant, role, check_mode)
else:
# Should never reach here
raise ValueError("Code should never reach here")
return dict(changed=changed, id=id)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 3,295,743,497,863,464,000 | 30.493734 | 91 | 0.607274 | false |
maxamillion/atomic-reactor | tests/plugins/test_flatpak_create_dockerfile.py | 1 | 3526 | """
Copyright (c) 2017 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from flexmock import flexmock
import json
from modulemd import ModuleMetadata
import responses
import os
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugins.pre_resolve_module_compose import (ComposeInfo,
ModuleInfo,
set_compose_info)
from atomic_reactor.plugins.pre_flatpak_create_dockerfile import FlatpakCreateDockerfilePlugin
from atomic_reactor.plugin import PreBuildPluginsRunner
from atomic_reactor.source import VcsInfo
from atomic_reactor.util import ImageName
from tests.constants import (MOCK_SOURCE, FLATPAK_GIT, FLATPAK_SHA1)
from tests.fixtures import docker_tasker # noqa
from tests.flatpak import FLATPAK_APP_JSON, FLATPAK_APP_MODULEMD, FLATPAK_APP_RPMS
class MockSource(object):
def __init__(self, tmpdir):
tmpdir = str(tmpdir)
self.dockerfile_path = "./"
self.path = tmpdir
self.flatpak_json_path = os.path.join(tmpdir, 'flatpak.json')
def get_build_file_path(self):
return self.flatpak_json_path, self.path
def get_vcs_info(self):
return VcsInfo('git', FLATPAK_GIT, FLATPAK_SHA1)
class MockBuilder(object):
def __init__(self):
self.image_id = "xxx"
self.base_image = ImageName.parse("org.gnome.eog")
def set_base_image(self, base_image):
pass
def set_df_path(self, path):
self.df_path = path
def mock_workflow(tmpdir):
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
mock_source = MockSource(tmpdir)
setattr(workflow, 'builder', MockBuilder())
workflow.builder.source = mock_source
flexmock(workflow, source=mock_source)
with open(mock_source.flatpak_json_path, "w") as f:
f.write(json.dumps(FLATPAK_APP_JSON))
setattr(workflow.builder, 'df_dir', str(tmpdir))
return workflow
PDC_URL = 'https://pdc.fedoraproject.org/rest_api/v1'
MODULE_NAME = 'eog'
MODULE_STREAM = 'f26'
ALL_VERSIONS_JSON = [{"variant_release": "20170629143459"},
{"variant_release": "20170629213428"}]
LATEST_VERSION = "20170629213428"
LATEST_VERSION_JSON = [{"modulemd": FLATPAK_APP_MODULEMD}]
@responses.activate # noqa - docker_tasker fixture
def test_flatpak_create_dockerfile(tmpdir, docker_tasker):
workflow = mock_workflow(tmpdir)
args = {
'base_image': "registry.fedoraproject.org/fedora:latest",
}
mmd = ModuleMetadata()
mmd.loads(FLATPAK_APP_MODULEMD)
base_module = ModuleInfo(MODULE_NAME, MODULE_STREAM, LATEST_VERSION,
mmd, FLATPAK_APP_RPMS)
repo_url = 'http://odcs.example/composes/latest-odcs-42-1/compose/Temporary/$basearch/os/'
compose_info = ComposeInfo(MODULE_STREAM + '-' + MODULE_STREAM,
42, base_module,
{'eog': base_module},
repo_url)
set_compose_info(workflow, compose_info)
runner = PreBuildPluginsRunner(
docker_tasker,
workflow,
[{
'name': FlatpakCreateDockerfilePlugin.key,
'args': args
}]
)
runner.run()
assert os.path.exists(workflow.builder.df_path)
assert os.path.exists(os.path.join(workflow.builder.df_dir, 'cleanup.sh'))
| bsd-3-clause | 271,557,634,577,478,270 | 29.66087 | 94 | 0.648894 | false |
xiejianying/pjsip_trunk | pjsip-apps/src/python/samples/registration.py | 43 | 1995 | # $Id$
#
# SIP account and registration sample. In this sample, the program
# will block to wait until registration is complete
#
# Copyright (C) 2003-2008 Benny Prijono <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
import pjsua as pj
import threading
def log_cb(level, str, len):
print str,
class MyAccountCallback(pj.AccountCallback):
sem = None
def __init__(self, account):
pj.AccountCallback.__init__(self, account)
def wait(self):
self.sem = threading.Semaphore(0)
self.sem.acquire()
def on_reg_state(self):
if self.sem:
if self.account.info().reg_status >= 200:
self.sem.release()
lib = pj.Lib()
try:
lib.init(log_cfg = pj.LogConfig(level=4, callback=log_cb))
lib.create_transport(pj.TransportType.UDP, pj.TransportConfig(5080))
lib.start()
acc = lib.create_account(pj.AccountConfig("pjsip.org", "bennylp", "***"))
acc_cb = MyAccountCallback(acc)
acc.set_callback(acc_cb)
acc_cb.wait()
print "\n"
print "Registration complete, status=", acc.info().reg_status, \
"(" + acc.info().reg_reason + ")"
print "\nPress ENTER to quit"
sys.stdin.readline()
lib.destroy()
lib = None
except pj.Error, e:
print "Exception: " + str(e)
lib.destroy()
| gpl-2.0 | 262,688,797,470,756,380 | 27.5 | 77 | 0.67619 | false |
Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/operations/_shared_gallery_images_operations.py | 1 | 9136 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SharedGalleryImagesOperations(object):
"""SharedGalleryImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
gallery_unique_name, # type: str
shared_to=None, # type: Optional[Union[str, "_models.SharedToValues"]]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SharedGalleryImageList"]
"""List shared gallery images by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param gallery_unique_name: The unique name of the Shared Gallery.
:type gallery_unique_name: str
:param shared_to: The query parameter to decide what shared galleries to fetch when doing
listing operations.
:type shared_to: str or ~azure.mgmt.compute.v2020_09_30.models.SharedToValues
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedGalleryImageList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.SharedGalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
'galleryUniqueName': self._serialize.url("gallery_unique_name", gallery_unique_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if shared_to is not None:
query_parameters['sharedTo'] = self._serialize.query("shared_to", shared_to, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SharedGalleryImageList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images'} # type: ignore
def get(
self,
location, # type: str
gallery_unique_name, # type: str
gallery_image_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SharedGalleryImage"
"""Get a shared gallery image by subscription id or tenant id.
:param location: Resource location.
:type location: str
:param gallery_unique_name: The unique name of the Shared Gallery.
:type gallery_unique_name: str
:param gallery_image_name: The name of the Shared Gallery Image Definition from which the Image
Versions are to be listed.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedGalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.SharedGalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedGalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'location': self._serialize.url("location", location, 'str'),
'galleryUniqueName': self._serialize.url("gallery_unique_name", gallery_unique_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedGalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/sharedGalleries/{galleryUniqueName}/images/{galleryImageName}'} # type: ignore
| mit | -5,635,014,531,640,726,000 | 46.832461 | 188 | 0.6412 | false |
asteca/ASteCA | packages/synth_clust/synthClustPrep.py | 1 | 2333 |
import numpy as np
from . import synth_cluster
from . add_errors import getSigmas
def main(clp, pd):
"""
"""
if pd['best_fit_algor'] != 'n':
# Use the selected solution values for all the parameters.
model = clp['isoch_fit_params'][pd['D3_sol'] + '_sol']
# Pack common args.
clp['syntClustArgs'] = (
pd['fundam_params'], clp['isoch_fit_params']['varIdxs'],
clp['completeness'], clp['err_lst'], clp['em_float'],
clp['max_mag_syn'], pd['ext_coefs'], pd['binar_flag'],
pd['mean_bin_mr'], pd['N_fc'], pd['m_ini_idx'],
pd['st_dist_mass'], pd['theor_tracks'], pd['err_norm_rand'],
pd['binar_probs'], pd['ext_unif_rand'], pd['R_V'])
# Generate isochrone, synthetic cluster (with uncertainties), and sigma
# values for the "best" fitted parameters.
# Model with the "best" fitted parameters.
model_var = np.array(model)[clp['isoch_fit_params']['varIdxs']]
# shape: (N_dim, N_stars)
synth_clust = setSynthClust(model_var, *clp['syntClustArgs'])
# Get uncertainties
sigma = []
for i, popt_mc in enumerate(clp['err_lst']):
sigma.append(getSigmas(synth_clust[0], popt_mc))
# Save for plotting and storing.
clp['synth_cl_phot'], clp['synth_cl_sigma'] = synth_clust, sigma
# Mask that points to *binary* systems
if pd['binar_flag']:
clp['binar_idx'] = ~(synth_clust[-1] == -99.)
else:
clp['binar_idx'] = np.array([
False for _ in range(synth_clust.shape[-1])])
return clp
def setSynthClust(
model, fundam_params, varIdxs, completeness, err_lst,
em_float, max_mag_syn, ext_coefs, binar_flag, mean_bin_mr, N_fc, m_ini_idx,
st_dist_mass, theor_tracks, err_norm_rand, binar_probs,
ext_unif_rand, R_V):
"""
Generate synthetic cluster given by 'model'.
"""
# This returns the non-reduced, non-transposed array
transpose_flag = False
return synth_cluster.main(
fundam_params, varIdxs, model, completeness, err_lst, em_float,
max_mag_syn, ext_coefs, binar_flag, mean_bin_mr, N_fc, m_ini_idx,
st_dist_mass, theor_tracks, err_norm_rand, binar_probs,
ext_unif_rand, R_V, transpose_flag)
| gpl-3.0 | 995,902,836,460,400,800 | 36.629032 | 79 | 0.582083 | false |
aviarypl/mozilla-l10n-addons-server | src/olympia/ratings/admin.py | 2 | 3004 | from django.contrib import admin
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from olympia.translations.utils import truncate_text
from olympia.zadmin.admin import related_single_content_link
from .models import Rating
class RatingTypeFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'Type'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'type'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('rating', 'User Rating'),
('reply', 'Developer/Admin Reply'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'rating':
return queryset.filter(reply_to__isnull=True)
elif self.value() == 'reply':
return queryset.filter(reply_to__isnull=False)
return queryset
class RatingAdmin(admin.ModelAdmin):
raw_id_fields = ('addon', 'version', 'user', 'reply_to',)
readonly_fields = ('addon', 'addon_link', 'version', 'user', 'reply_to',
'ip_address', 'body', 'rating', 'deleted',
'ip_address_link', 'user_link')
fields = ('addon_link', 'version', 'body', 'rating', 'ip_address_link',
'user_link', 'deleted')
list_display = ('id', 'addon', 'created', 'user', 'ip_address', 'rating',
'is_reply', 'flag', 'deleted', 'truncated_body',)
list_filter = ('deleted', RatingTypeFilter)
actions = ('delete_selected',)
def queryset(self, request):
return Rating.unfiltered.all()
def truncated_body(self, obj):
return truncate_text(obj.body, 140)[0] if obj.body else ''
def is_reply(self, obj):
return bool(obj.reply_to)
is_reply.boolean = True
is_reply.admin_order_field = 'reply_to'
def addon_link(self, obj):
return related_single_content_link(obj, 'addon')
addon_link.short_description = _(u'Add-on')
def user_link(self, obj):
return related_single_content_link(obj, 'user')
user_link.short_description = _(u'User')
def ip_address_link(self, obj):
return format_html(
'<a href="{}?{}={}">{}</a>',
reverse('admin:ratings_rating_changelist'),
'ip_address', obj.ip_address, obj.ip_address)
ip_address_link.short_description = _(u'IP Address')
admin.site.register(Rating, RatingAdmin)
| bsd-3-clause | 3,857,850,833,888,143,000 | 34.341176 | 77 | 0.619507 | false |
vojtechtrefny/blivet-gui | blivetgui/blivetgui.py | 1 | 22432 | # -*- coding: utf-8 -*-
# list_partitions.py
# Main blivet-gui class for GUI
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vojtech Trefny <[email protected]>
#
#------------------------------------------------------------------------------#
from gi.repository import Gtk, GObject
from .main_window import MainWindow
from .list_devices import ListDevices
from .list_partitions import ListPartitions
from .list_actions import ListActions
from .main_menu import MainMenu
from .actions_menu import ActionsMenu
from .actions_toolbar import ActionsToolbar
from .device_info import DeviceInfo
from .devicevisualization.device_canvas import DeviceCanvas
from .utils import BlivetUtils
from .logs import set_logging, set_python_meh, remove_logs
from .dialogs import message_dialogs, other_dialogs, edit_dialog, add_dialog
from .processing_window import ProcessingActions
import gettext
import threading
import os
import sys
import atexit
import six
#------------------------------------------------------------------------------#
_ = lambda x: gettext.ldgettext("blivet-gui", x)
#------------------------------------------------------------------------------#
def locate_ui_file(filename):
""" Locate neccessary Glade .ui files
"""
path = [os.path.split(os.path.abspath(__file__))[0] + "/../data/ui/",
"/usr/share/blivet-gui/ui/"]
for folder in path:
fname = folder + filename
if os.access(fname, os.R_OK):
return fname
raise RuntimeError("Unable to find glade file %s" % file)
#------------------------------------------------------------------------------#
class BlivetGUI(object):
def __init__(self, embedded_socket=None, kickstart_mode=False):
self.embedded_socket = embedded_socket
self.kickstart_mode = kickstart_mode
self.builder = Gtk.Builder()
self.builder.add_from_file(locate_ui_file("blivet-gui.ui"))
### Logging
self.blivet_logfile, self.blivet_log = set_logging(component="blivet")
self.program_logfile, self.program_log = set_logging(component="program")
self.blivetgui_logfile, self.log = set_logging(component="blivet-gui")
handler = set_python_meh(log_files=[self.blivet_logfile, self.program_logfile, self.blivetgui_logfile])
handler.install(None)
atexit.register(remove_logs, log_files=[self.blivet_logfile, self.program_logfile,
self.blivetgui_logfile])
### BlivetUtils
self.blivet_utils = BlivetUtils(kickstart_mode)
### MainWindow
self.main_window = MainWindow(self).window
### Kickstart devices dialog
if self.kickstart_mode:
self.use_disks = self.kickstart_disk_selection()
self.old_mountpoints = self.blivet_utils.kickstart_mountpoints()
### MainMenu
self.main_menu = MainMenu(self)
self.builder.get_object("vbox").add(self.main_menu.menu_bar)
### ActionsMenu
self.popup_menu = ActionsMenu(self)
### ActionsToolbar
self.toolbar = ActionsToolbar(self)
self.builder.get_object("vbox").add(self.toolbar.toolbar)
### ListDevices
self.list_devices = ListDevices(self)
self.builder.get_object("disks_viewport").add(self.list_devices.disks_view)
### ListPartitions
self.list_partitions = ListPartitions(self)
self.builder.get_object("partitions_viewport").add(self.list_partitions.partitions_view)
self.partitions_label = self.builder.get_object("partitions_page")
self.partitions_label.set_text(_("Partitions"))
### ListActions
self.list_actions = ListActions(self)
self.builder.get_object("actions_viewport").add(self.list_actions.actions_view)
self.actions_label = self.builder.get_object("actions_page")
self.actions_label.set_text(_("Pending actions ({0})").format(self.list_actions.actions))
### DeviceInfo
self.device_info = DeviceInfo(self)
self.builder.get_object("pv_viewport").add(self.device_info.info_label)
### DeviceCanvas
self.device_canvas = DeviceCanvas(self)
self.builder.get_object("image_window").add(self.device_canvas)
# select first device in ListDevice
self.list_devices.disks_view.set_cursor(1)
self.main_window.show_all()
def update_partitions_view(self, device_changed=False):
self.list_partitions.update_partitions_list(self.list_devices.selected_device)
self.device_canvas.visualize_device(self.list_partitions.partitions_list,
self.list_partitions.partitions_view,
self.list_devices.selected_device)
if device_changed:
self.device_info.update_device_info(self.list_devices.selected_device)
def activate_options(self, activate_list):
""" Activate toolbar buttons and menu items
:param activate_list: list of items to activate
:type activate_list: list of str
"""
for item in activate_list:
self.toolbar.activate_buttons([item])
self.main_menu.activate_menu_items([item])
if item not in ("apply", "clear", "undo"):
self.popup_menu.activate_menu_items([item])
def deactivate_options(self, deactivate_list):
""" Deactivate toolbar buttons and menu items
:param deactivate_list: list of items to deactivate
:type deactivate_list: list of str
"""
for item in deactivate_list:
self.toolbar.deactivate_buttons([item])
self.main_menu.deactivate_menu_items([item])
if item not in ("apply", "clear", "undo"):
self.popup_menu.deactivate_menu_items([item])
def deactivate_all_options(self):
""" Deactivate all partition-based buttons/menu items
"""
self.toolbar.deactivate_all()
self.main_menu.deactivate_all()
self.popup_menu.deactivate_all()
def kickstart_disk_selection(self):
disks = self.blivet_utils.get_disks()
if len(disks) == 0:
msg = _("blivet-gui failed to find at least one storage device to work with.\n\n" \
"Please connect a storage device to your computer and re-run blivet-gui.")
self.show_error_dialog(msg)
self.quit()
dialog = other_dialogs.KickstartSelectDevicesDialog(self.main_window, disks)
response = dialog.run()
if response == Gtk.ResponseType.OK:
use_disks, install_bootloader, bootloader_device = dialog.get_selection()
dialog.destroy()
else:
dialog.destroy()
self.quit()
if install_bootloader and bootloader_device:
self.blivet_utils.set_bootloader_device(bootloader_device)
self.blivet_utils.kickstart_hide_disks(use_disks)
return use_disks
def show_exception_dialog(self, exception_data, exception_traceback):
message_dialogs.ExceptionDialog(self.main_window, exception_data, exception_traceback)
def show_error_dialog(self, error_message):
message_dialogs.ErrorDialog(self.main_window, error_message)
def show_warning_dialog(self, warning_message):
message_dialogs.WarningDialog(self.main_window, warning_message)
def show_confirmation_dialog(self, title, question):
dialog = message_dialogs.ConfirmDialog(self.main_window, title, question)
response = dialog.run()
return response
def _raise_exception(self, exception, traceback):
if six.PY2:
raise six.reraise(type(exception), exception, traceback)
else:
raise exception.with_traceback(traceback)
def edit_device(self, widget=None):
""" Edit selected device
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
device = self.list_partitions.selected_partition[0]
if device.type in ("partition", "lvmlv"):
dialog = edit_dialog.PartitionEditDialog(self.main_window, device,
self.blivet_utils.device_resizable(device),
self.kickstart_mode)
elif device.type in ("lvmvg",):
dialog = edit_dialog.LVMEditDialog(self.main_window, device,
self.blivet_utils.get_free_pvs_info(),
self.blivet_utils.get_free_disks_regions(),
self.blivet_utils.get_removable_pvs_info(device))
response = dialog.run()
if response == Gtk.ResponseType.OK:
user_input = dialog.get_selection()
if device.type in ("partition", "lvmlv"):
result = self.blivet_utils.edit_partition_device(user_input)
elif device.type in ("lvmvg",):
result = self.blivet_utils.edit_lvmvg_device(user_input)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._raise_exception(result.exception, result.traceback)
else:
if result.actions:
action_str = _("edit {0} {1}").format(device.name, device.type)
self.list_actions.append("edit", action_str, result.actions)
if result.actions:
self.list_partitions.update_partitions_list(self.list_devices.selected_device)
dialog.destroy()
return
def add_partition(self, widget=None, btrfs_pt=False):
""" Add new partition
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
:param btrfs_pt: create btrfs as partition table
:type btrfs_pt: bool
"""
# parent device; free space has always only one parent #FIXME
parent_device = self.list_partitions.selected_partition[0].parents[0]
# btrfs volume has no special free space device -- parent device for newly
# created subvolume is not parent of selected device but device (btrfs volume)
# itself
if self.list_partitions.selected_partition[0].type == "btrfs volume":
parent_device = self.list_partitions.selected_partition[0]
parent_device_type = parent_device.type
if parent_device_type == "partition" and parent_device.format.type == "lvmpv":
parent_device_type = "lvmpv"
if parent_device_type == "disk" and not self.blivet_utils.has_disklabel(self.list_devices.selected_device) \
and btrfs_pt == False:
dialog = add_dialog.AddLabelDialog(self.main_window, self.list_devices.selected_device,
self.blivet_utils.get_available_disklabels())
response = dialog.run()
if response == Gtk.ResponseType.OK:
selection = dialog.get_selection()
if selection == "btrfs":
dialog.destroy()
self.add_partition(btrfs_pt=True)
return
result = self.blivet_utils.create_disk_label(self.list_devices.selected_device, selection)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._raise_exception(result.exception, result.traceback)
else:
if result.actions:
action_str = _("create new disklabel on {0}").format(self.list_devices.selected_device.name)
self.list_actions.append("add", action_str, result.actions)
self.update_partitions_view()
dialog.destroy()
return
dialog = add_dialog.AddDialog(self.main_window,
parent_device_type,
parent_device,
self.list_partitions.selected_partition[0],
self.list_partitions.selected_partition[0].size,
self.blivet_utils.get_free_pvs_info(),
self.blivet_utils.get_free_disks_regions(),
self.blivet_utils.get_available_raid_levels(),
self.blivet_utils.has_extended_partition(self.list_devices.selected_device),
self.blivet_utils.storage.mountpoints,
self.kickstart_mode)
response = dialog.run()
if response == Gtk.ResponseType.OK:
user_input = dialog.get_selection()
result = self.blivet_utils.add_device(user_input)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._raise_exception(result.exception, result.traceback)
else:
if result.actions:
if not user_input.filesystem:
action_str = _("add {0} {1} device").format(str(user_input.size),
user_input.device_type)
else:
action_str = _("add {0} {1} partition").format(str(user_input.size),
user_input.filesystem)
self.list_actions.append("add", action_str, result.actions)
self.list_devices.update_devices_view()
self.update_partitions_view()
dialog.destroy()
return
def delete_selected_partition(self, widget=None):
""" Delete selected partition
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
deleted_device = self.list_partitions.selected_partition[0]
title = _("Confirm delete operation")
msg = _("Are you sure you want delete device {0}?").format(deleted_device.name)
dialog = message_dialogs.ConfirmDialog(self.main_window, title, msg)
response = dialog.run()
if response:
result = self.blivet_utils.delete_device(deleted_device)
if not result.success:
if not result.exception:
self.show_error_dialog(result.message)
else:
self._raise_exception(result.exception, result.traceback)
else:
action_str = _("delete partition {0}").format(deleted_device.name)
self.list_actions.append("delete", action_str, result.actions)
self.update_partitions_view()
self.list_devices.update_devices_view()
def perform_actions(self, dialog):
""" Perform queued actions
"""
def end(success, error, traceback):
if success:
dialog.stop()
else:
dialog.destroy()
self.main_window.set_sensitive(False)
self._raise_exception(error, traceback)
def do_it():
""" Run blivet.doIt()
"""
try:
self.blivet_utils.blivet_do_it()
GObject.idle_add(end, True, None, None)
except Exception as e: # pylint: disable=broad-except
self.blivet_utils.blivet_reset()
GObject.idle_add(end, False, e, sys.exc_info()[2])
return
thread = threading.Thread(target=do_it)
thread.start()
dialog.start()
thread.join()
self.list_actions.clear()
self.list_devices.update_devices_view()
self.update_partitions_view()
def apply_event(self, widget=None):
""" Apply event for main menu/toolbar
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
.. note::
This is neccessary because of kickstart mode -- in "standard" mode
we need only simple confirmation dialog, but in kickstart mode it
is neccessary to create file choosing dialog for kickstart file save.
"""
if self.kickstart_mode:
dialog = other_dialogs.KickstartFileSaveDialog(self.main_window)
response = dialog.run()
if response:
if os.path.isfile(response):
title = _("File exists")
msg = _("Selected file already exists, do you want to overwrite it?")
dialog_file = message_dialogs.ConfirmDialog(self.main_window, title, msg)
response_file = dialog_file.run()
if not response_file:
return
self.blivet_utils.create_kickstart_file(response)
msg = _("File with your Kickstart configuration was successfully saved to:\n\n" \
"{0}").format(response)
message_dialogs.InfoDialog(self.main_window, msg)
else:
title = _("Confirm scheduled actions")
msg = _("Are you sure you want to perform scheduled actions?")
actions = self.blivet_utils.get_actions()
dialog = message_dialogs.ConfirmActionsDialog(self.main_window, title, msg, actions)
response = dialog.run()
if response:
processing_dialog = ProcessingActions(self)
self.perform_actions(processing_dialog)
def umount_partition(self, widget=None):
""" Unmount selected partition
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
result = self.blivet_utils.unmount_device(self.list_partitions.selected_partition[0])
if not result:
msg = _("Unmount failed. Are you sure device is not in use?")
self.show_error_dialog(msg)
self.update_partitions_view()
def decrypt_device(self, widget=None):
""" Decrypt selected device
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
dialog = other_dialogs.LuksPassphraseDialog(self.main_window)
response = dialog.run()
if response:
ret = self.blivet_utils.luks_decrypt(self.list_partitions.selected_partition[0], response)
if ret:
msg = _("Unknown error appeared:\n\n{0}.").format(ret)
message_dialogs.ErrorDialog(self.main_window, msg)
return
self.list_devices.update_devices_view()
self.update_partitions_view()
def actions_undo(self, widget=None):
""" Undo last action
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
removed_actions = self.list_actions.pop()
self.blivet_utils.blivet_cancel_actions(removed_actions)
self.list_devices.update_devices_view()
self.update_partitions_view()
def clear_actions(self, widget=None):
""" Clear all scheduled actions
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
self.blivet_utils.blivet_reset()
self.list_actions.clear()
self.list_devices.update_devices_view()
self.update_partitions_view()
def reload(self, widget=None):
""" Reload storage information
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
if self.list_actions.actions:
title = _("Confirm reload storage")
msg = _("There are pending operations. Are you sure you want to continue?")
response = self.show_confirmation_dialog(title, msg)
if not response:
return
self.blivet_utils.blivet_reset()
if self.kickstart_mode:
self.blivet_utils.kickstart_hide_disks(self.use_disks)
self.list_actions.clear()
self.list_devices.update_devices_view()
self.update_partitions_view()
def quit(self, event=None, widget=None):
""" Quit blivet-gui
:param widget: widget calling this function (only for calls via signal.connect)
:type widget: Gtk.Widget()
"""
if self.list_actions.actions:
title = _("Are you sure you want to quit?")
msg = _("There are unapplied actions. Are you sure you want to quit blivet-gui now?")
response = self.show_confirmation_dialog(title, msg)
if not response:
return True
Gtk.main_quit()
| gpl-2.0 | -7,924,271,281,297,831,000 | 35.122383 | 116 | 0.584121 | false |
bkjones/bevis | bevis/bevis_server.py | 1 | 7943 | #!/usr/bin/env python
"""
A syslog receiver that queues messages to an AMQP broker.
"""
import errno
import json
import socket
import logging
import pika
from tornado import ioloop, httpserver
from loggerglue.rfc5424 import SyslogEntry
class Bevis(httpserver.HTTPServer):
"""
bevis = bevis_server.Bevis(config)
bevis.listen(config["Server"]["port"])
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
bevis.stop()
"""
def __init__(self, config):
self.io_loop = None
self._socket = None
self._started = False
self.config = config
self._sockets = {}
self._log = logging.getLogger()
self._log.debug("Logging is set to debug")
self.setup()
def setup(self):
"""Sets up the AMQP connection and channel. These both
eventually are passed to bevis_connection for use in actually
publishing the syslog messages to the broker.
TODO: Clean up those except blocks. Yuk!
"""
def pika_on_connected(connection):
try:
logging.debug("Getting channel from connection...")
connection.channel(pika_on_channel_open)
except Exception as out:
logging.debug(out)
def pika_on_channel_open(channel):
try:
self.pika_channel = channel
logging.debug("Set up self.pika_channel %s", self.pika_channel)
except Exception as out:
logging.debug(out)
credentials = pika.PlainCredentials(self.config["AMQP"]["username"],
self.config["AMQP"]["password"])
parameters = pika.ConnectionParameters(self.config["AMQP"]["host"],
virtual_host = self.config["AMQP"]["vhost"],
credentials=credentials)
try:
pika.adapters.tornado_connection.TornadoConnection(
parameters, pika_on_connected)
logging.debug("Set up TornadoConnection")
except Exception as out:
logging.debug(out)
def stop(self):
"""Server Shutdown"""
self._socket.close()
def _handle_events(self, fd, events):
"""Accept a new connection -- starts BevisConnection"""
while True:
try:
connection, address = self._sockets[fd].accept()
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
# Start a new BevisConnection
BevisConnection(connection,
address,
self.io_loop,
self.pika_channel,
self.config['AMQP']['topic_exch'],
self.config['AMQP']['rt_key_components'])
def get_severity_and_facility(syslog_dict):
"""
Parses the rfc5424 'prival' in the dict to derive
the severity and facility of the message, maps them to human-readable
words, and adds those keys to the dictionary.
"""
sev_words = {0: 'emerg',
1: 'alert',
2: 'crit',
3: 'err',
4: 'warn',
5: 'notice',
6: 'info',
7: 'debug'}
fac_words = {0: 'kern',
1: 'user',
2: 'mail',
3: 'daemon',
4: 'auth',
5: 'syslog',
6: 'lpr',
7: 'news',
8: 'uucp',
9: 'clock',
10: 'authpriv',
11: 'ftp',
12: 'ntp',
13: 'logaudit',
14: 'logalert',
15: 'clock2',
16: 'local0',
17: 'local1',
18: 'local2',
19: 'local3',
20: 'local4',
21: 'local5',
22: 'local6',
23: 'local7'}
prival = syslog_dict['prival']
if not prival:
return syslog_dict
severity = prival & 7
facility = (prival - severity) / 8
syslog_dict['severity'] = sev_words[severity]
syslog_dict['facility'] = fac_words[facility]
return syslog_dict
class BevisConnection(object):
"""Takes each message coming into bevis_server and (asynchronously) sends
it along to the AMQP broker.
It is assumed that the AMQP exchange is set up and properly bound
to a queue. No binding happens here.
It is also assumed that all log messages are < 4096 bytes in length.
"""
def __init__(self, socket, address, io_loop, pika_channel,
topic_exch, rt_key_components):
"""
Adds itself to the tornado ioloop, puts together an
AMQP message, and publishes it.
"""
self.read_chunk_size = 4096
self.socket = socket
self.socket.setblocking(False)
self.fileno = socket.fileno()
self.address = address
self.pika_channel = pika_channel
self.topic_exch = topic_exch
self.rt_key_components = rt_key_components
# setup io loop
self.io_loop = io_loop
self.io_loop.add_handler(self.fileno, self._handle_events,
self.io_loop.READ | self.io_loop.ERROR)
logging.info("New connection [%s]: %s", self.fileno, str(self.address))
def _handle_events(self, fd, events):
if not self.socket:
logging.warning("Got events for closed stream %d", fd)
return
if events & self.io_loop.READ:
self._handle_read()
if events & self.io_loop.ERROR:
self._close_socket()
return
def _close_socket(self):
"""Closes socket and removes it from epoll and FlockServer"""
try:
self.io_loop.remove_handler(self.fileno)
except:
pass
if self.socket:
self.socket.close()
self.socket = None
def _handle_read(self):
"""Signal by epoll: data chunk ready to read from socket buffer."""
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
else:
print "Read error on %d: %s" % (self.fileno, e)
self._close_socket()
return
if not chunk or not chunk.strip():
logging.info("empty chunk. closing socket")
self._close_socket()
return
self._process_request(chunk)
logging.debug("Read handled. All done.")
def _process_request(self, request):
"""Processing of the log entry. Later this will do more work"""
syslog_dict = {}
try:
logging.debug("INCOMING REQ: %s" % request)
syslog_entry = SyslogEntry.from_line(request)
syslog_dict = syslog_entry.__dict__
syslog_dict = get_severity_and_facility(syslog_dict)
except Exception as out:
logging.error(out)
logging.debug("Processing request...")
self.send_to_amqp(syslog_dict)
def send_to_amqp(self, syslog_dict):
"""
Send request to AMQP broker.
"""
rt_key = '.'.join([syslog_dict[i] for i in self.rt_key_components])
logging.debug("Sending amqp message")
syslog_json = json.dumps(syslog_dict, default=str)
logging.debug("Sending to AMQP: %s", syslog_json)
# Send via pika
logging.debug("RT KEY: %s EXCH: %s", rt_key, self.topic_exch)
self.pika_channel.basic_publish(exchange=self.topic_exch,
routing_key=rt_key,
body=syslog_json)
| bsd-3-clause | -9,159,939,354,227,504,000 | 30.772 | 79 | 0.528516 | false |
thnee/ansible | test/lib/ansible_test/_internal/util.py | 13 | 26012 | """Miscellaneous utility functions and classes."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import contextlib
import errno
import fcntl
import inspect
import os
import pkgutil
import random
import re
import shutil
import socket
import stat
import string
import subprocess
import sys
import tempfile
import time
import zipfile
from struct import unpack, pack
from termios import TIOCGWINSZ
try:
from abc import ABC
except ImportError:
from abc import ABCMeta
ABC = ABCMeta('ABC', (), {})
try:
# noinspection PyCompatibility
from configparser import ConfigParser
except ImportError:
# noinspection PyCompatibility,PyUnresolvedReferences
from ConfigParser import SafeConfigParser as ConfigParser
try:
# noinspection PyProtectedMember
from shlex import quote as cmd_quote
except ImportError:
# noinspection PyProtectedMember
from pipes import quote as cmd_quote
from . import types as t
try:
C = t.TypeVar('C')
except AttributeError:
C = None
DOCKER_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
REMOTE_COMPLETION = {} # type: t.Dict[str, t.Dict[str, str]]
PYTHON_PATHS = {} # type: t.Dict[str, str]
try:
# noinspection PyUnresolvedReferences
MAXFD = subprocess.MAXFD
except AttributeError:
MAXFD = -1
COVERAGE_CONFIG_NAME = 'coveragerc'
ANSIBLE_TEST_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# assume running from install
ANSIBLE_ROOT = os.path.dirname(ANSIBLE_TEST_ROOT)
ANSIBLE_BIN_PATH = os.path.dirname(os.path.abspath(sys.argv[0]))
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'ansible')
ANSIBLE_SOURCE_ROOT = None
if not os.path.exists(ANSIBLE_LIB_ROOT):
# running from source
ANSIBLE_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(ANSIBLE_TEST_ROOT)))
ANSIBLE_BIN_PATH = os.path.join(ANSIBLE_ROOT, 'bin')
ANSIBLE_LIB_ROOT = os.path.join(ANSIBLE_ROOT, 'lib', 'ansible')
ANSIBLE_SOURCE_ROOT = ANSIBLE_ROOT
ANSIBLE_TEST_DATA_ROOT = os.path.join(ANSIBLE_TEST_ROOT, '_data')
ANSIBLE_TEST_CONFIG_ROOT = os.path.join(ANSIBLE_TEST_ROOT, 'config')
# Modes are set to allow all users the same level of access.
# This permits files to be used in tests that change users.
# The only exception is write access to directories for the user creating them.
# This avoids having to modify the directory permissions a second time.
MODE_READ = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
MODE_FILE = MODE_READ
MODE_FILE_EXECUTE = MODE_FILE | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_FILE_WRITE = MODE_FILE | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
MODE_DIRECTORY = MODE_READ | stat.S_IWUSR | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
MODE_DIRECTORY_WRITE = MODE_DIRECTORY | stat.S_IWGRP | stat.S_IWOTH
ENCODING = 'utf-8'
Text = type(u'')
REMOTE_ONLY_PYTHON_VERSIONS = (
'2.6',
)
SUPPORTED_PYTHON_VERSIONS = (
'2.6',
'2.7',
'3.5',
'3.6',
'3.7',
'3.8',
)
def to_optional_bytes(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[bytes]
"""Return the given value as bytes encoded using UTF-8 if not already bytes, or None if the value is None."""
return None if value is None else to_bytes(value, errors)
def to_optional_text(value, errors='strict'): # type: (t.Optional[t.AnyStr], str) -> t.Optional[t.Text]
"""Return the given value as text decoded using UTF-8 if not already text, or None if the value is None."""
return None if value is None else to_text(value, errors)
def to_bytes(value, errors='strict'): # type: (t.AnyStr, str) -> bytes
"""Return the given value as bytes encoded using UTF-8 if not already bytes."""
if isinstance(value, bytes):
return value
if isinstance(value, Text):
return value.encode(ENCODING, errors)
raise Exception('value is not bytes or text: %s' % type(value))
def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> t.Text
"""Return the given value as text decoded using UTF-8 if not already text."""
if isinstance(value, bytes):
return value.decode(ENCODING, errors)
if isinstance(value, Text):
return value
raise Exception('value is not bytes or text: %s' % type(value))
def get_docker_completion():
"""
:rtype: dict[str, dict[str, str]]
"""
return get_parameterized_completion(DOCKER_COMPLETION, 'docker')
def get_remote_completion():
"""
:rtype: dict[str, dict[str, str]]
"""
return get_parameterized_completion(REMOTE_COMPLETION, 'remote')
def get_parameterized_completion(cache, name):
"""
:type cache: dict[str, dict[str, str]]
:type name: str
:rtype: dict[str, dict[str, str]]
"""
if not cache:
images = read_lines_without_comments(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'completion', '%s.txt' % name), remove_blank_lines=True)
cache.update(dict(kvp for kvp in [parse_parameterized_completion(i) for i in images] if kvp))
return cache
def parse_parameterized_completion(value):
"""
:type value: str
:rtype: tuple[str, dict[str, str]]
"""
values = value.split()
if not values:
return None
name = values[0]
data = dict((kvp[0], kvp[1] if len(kvp) > 1 else '') for kvp in [item.split('=', 1) for item in values[1:]])
return name, data
def is_shippable():
"""
:rtype: bool
"""
return os.environ.get('SHIPPABLE') == 'true'
def remove_file(path):
"""
:type path: str
"""
if os.path.isfile(path):
os.remove(path)
def read_lines_without_comments(path, remove_blank_lines=False, optional=False): # type: (str, bool, bool) -> t.List[str]
"""
Returns lines from the specified text file with comments removed.
Comments are any content from a hash symbol to the end of a line.
Any spaces immediately before a comment are also removed.
"""
if optional and not os.path.exists(path):
return []
with open(path, 'r') as path_fd:
lines = path_fd.read().splitlines()
lines = [re.sub(r' *#.*$', '', line) for line in lines]
if remove_blank_lines:
lines = [line for line in lines if line]
return lines
def find_executable(executable, cwd=None, path=None, required=True):
"""
:type executable: str
:type cwd: str
:type path: str
:type required: bool | str
:rtype: str | None
"""
match = None
real_cwd = os.getcwd()
if not cwd:
cwd = real_cwd
if os.path.dirname(executable):
target = os.path.join(cwd, executable)
if os.path.exists(target) and os.access(target, os.F_OK | os.X_OK):
match = executable
else:
if path is None:
path = os.environ.get('PATH', os.path.defpath)
if path:
path_dirs = path.split(os.path.pathsep)
seen_dirs = set()
for path_dir in path_dirs:
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
if os.path.abspath(path_dir) == real_cwd:
path_dir = cwd
candidate = os.path.join(path_dir, executable)
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
match = candidate
break
if not match and required:
message = 'Required program "%s" not found.' % executable
if required != 'warning':
raise ApplicationError(message)
display.warning(message)
return match
def find_python(version, path=None, required=True):
"""
:type version: str
:type path: str | None
:type required: bool
:rtype: str
"""
version_info = tuple(int(n) for n in version.split('.'))
if not path and version_info == sys.version_info[:len(version_info)]:
python_bin = sys.executable
else:
python_bin = find_executable('python%s' % version, path=path, required=required)
return python_bin
def get_ansible_version(): # type: () -> str
"""Return the Ansible version."""
try:
return get_ansible_version.version
except AttributeError:
pass
# ansible may not be in our sys.path
# avoids a symlink to release.py since ansible placement relative to ansible-test may change during delegation
load_module(os.path.join(ANSIBLE_LIB_ROOT, 'release.py'), 'ansible_release')
# noinspection PyUnresolvedReferences
from ansible_release import __version__ as ansible_version # pylint: disable=import-error
get_ansible_version.version = ansible_version
return ansible_version
def get_available_python_versions(versions): # type: (t.List[str]) -> t.Dict[str, str]
"""Return a dictionary indicating which of the requested Python versions are available."""
try:
return get_available_python_versions.result
except AttributeError:
pass
get_available_python_versions.result = dict((version, path) for version, path in
((version, find_python(version, required=False)) for version in versions) if path)
return get_available_python_versions.result
def generate_pip_command(python):
"""
:type python: str
:rtype: list[str]
"""
return [python, '-m', 'pip.__main__']
def raw_command(cmd, capture=False, env=None, data=None, cwd=None, explain=False, stdin=None, stdout=None,
cmd_verbosity=1, str_errors='strict'):
"""
:type cmd: collections.Iterable[str]
:type capture: bool
:type env: dict[str, str] | None
:type data: str | None
:type cwd: str | None
:type explain: bool
:type stdin: file | None
:type stdout: file | None
:type cmd_verbosity: int
:type str_errors: str
:rtype: str | None, str | None
"""
if not cwd:
cwd = os.getcwd()
if not env:
env = common_environment()
cmd = list(cmd)
escaped_cmd = ' '.join(cmd_quote(c) for c in cmd)
display.info('Run command: %s' % escaped_cmd, verbosity=cmd_verbosity, truncate=True)
display.info('Working directory: %s' % cwd, verbosity=2)
program = find_executable(cmd[0], cwd=cwd, path=env['PATH'], required='warning')
if program:
display.info('Program found: %s' % program, verbosity=2)
for key in sorted(env.keys()):
display.info('%s=%s' % (key, env[key]), verbosity=2)
if explain:
return None, None
communicate = False
if stdin is not None:
data = None
communicate = True
elif data is not None:
stdin = subprocess.PIPE
communicate = True
if stdout:
communicate = True
if capture:
stdout = stdout or subprocess.PIPE
stderr = subprocess.PIPE
communicate = True
else:
stderr = None
start = time.time()
process = None
try:
try:
cmd_bytes = [to_bytes(c) for c in cmd]
env_bytes = dict((to_bytes(k), to_bytes(v)) for k, v in env.items())
process = subprocess.Popen(cmd_bytes, env=env_bytes, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)
except OSError as ex:
if ex.errno == errno.ENOENT:
raise ApplicationError('Required program "%s" not found.' % cmd[0])
raise
if communicate:
data_bytes = to_optional_bytes(data)
stdout_bytes, stderr_bytes = process.communicate(data_bytes)
stdout_text = to_optional_text(stdout_bytes, str_errors) or u''
stderr_text = to_optional_text(stderr_bytes, str_errors) or u''
else:
process.wait()
stdout_text, stderr_text = None, None
finally:
if process and process.returncode is None:
process.kill()
display.info('') # the process we're interrupting may have completed a partial line of output
display.notice('Killed command to avoid an orphaned child process during handling of an unexpected exception.')
status = process.returncode
runtime = time.time() - start
display.info('Command exited with status %s after %s seconds.' % (status, runtime), verbosity=4)
if status == 0:
return stdout_text, stderr_text
raise SubprocessError(cmd, status, stdout_text, stderr_text, runtime)
def common_environment():
"""Common environment used for executing all programs."""
env = dict(
LC_ALL='en_US.UTF-8',
PATH=os.environ.get('PATH', os.path.defpath),
)
required = (
'HOME',
)
optional = (
'HTTPTESTER',
'LD_LIBRARY_PATH',
'SSH_AUTH_SOCK',
# MacOS High Sierra Compatibility
# http://sealiesoftware.com/blog/archive/2017/6/5/Objective-C_and_fork_in_macOS_1013.html
# Example configuration for macOS:
# export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
'OBJC_DISABLE_INITIALIZE_FORK_SAFETY',
'ANSIBLE_KEEP_REMOTE_FILES',
# MacOS Homebrew Compatibility
# https://cryptography.io/en/latest/installation/#building-cryptography-on-macos
# This may also be required to install pyyaml with libyaml support when installed in non-standard locations.
# Example configuration for brew on macOS:
# export LDFLAGS="-L$(brew --prefix openssl)/lib/ -L$(brew --prefix libyaml)/lib/"
# export CFLAGS="-I$(brew --prefix openssl)/include/ -I$(brew --prefix libyaml)/include/"
# However, this is not adequate for PyYAML 3.13, which is the latest version supported on Python 2.6.
# For that version the standard location must be used, or `pip install` must be invoked with additional options:
# --global-option=build_ext --global-option=-L{path_to_lib_dir}
'LDFLAGS',
'CFLAGS',
)
env.update(pass_vars(required=required, optional=optional))
return env
def pass_vars(required, optional):
"""
:type required: collections.Iterable[str]
:type optional: collections.Iterable[str]
:rtype: dict[str, str]
"""
env = {}
for name in required:
if name not in os.environ:
raise MissingEnvironmentVariable(name)
env[name] = os.environ[name]
for name in optional:
if name not in os.environ:
continue
env[name] = os.environ[name]
return env
def deepest_path(path_a, path_b):
"""Return the deepest of two paths, or None if the paths are unrelated.
:type path_a: str
:type path_b: str
:rtype: str | None
"""
if path_a == '.':
path_a = ''
if path_b == '.':
path_b = ''
if path_a.startswith(path_b):
return path_a or '.'
if path_b.startswith(path_a):
return path_b or '.'
return None
def remove_tree(path):
"""
:type path: str
"""
try:
shutil.rmtree(to_bytes(path))
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def make_dirs(path):
"""
:type path: str
"""
try:
os.makedirs(to_bytes(path))
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
def is_binary_file(path):
"""
:type path: str
:rtype: bool
"""
assume_text = set([
'.cfg',
'.conf',
'.crt',
'.cs',
'.css',
'.html',
'.ini',
'.j2',
'.js',
'.json',
'.md',
'.pem',
'.ps1',
'.psm1',
'.py',
'.rst',
'.sh',
'.txt',
'.xml',
'.yaml',
'.yml',
])
assume_binary = set([
'.bin',
'.eot',
'.gz',
'.ico',
'.iso',
'.jpg',
'.otf',
'.p12',
'.png',
'.pyc',
'.rpm',
'.ttf',
'.woff',
'.woff2',
'.zip',
])
ext = os.path.splitext(path)[1]
if ext in assume_text:
return False
if ext in assume_binary:
return True
with open(path, 'rb') as path_fd:
return b'\0' in path_fd.read(1024)
def generate_password():
"""Generate a random password.
:rtype: str
"""
chars = [
string.ascii_letters,
string.digits,
string.ascii_letters,
string.digits,
'-',
] * 4
password = ''.join([random.choice(char) for char in chars[:-1]])
display.sensitive.add(password)
return password
class Display:
"""Manages color console output."""
clear = '\033[0m'
red = '\033[31m'
green = '\033[32m'
yellow = '\033[33m'
blue = '\033[34m'
purple = '\033[35m'
cyan = '\033[36m'
verbosity_colors = {
0: None,
1: green,
2: blue,
3: cyan,
}
def __init__(self):
self.verbosity = 0
self.color = sys.stdout.isatty()
self.warnings = []
self.warnings_unique = set()
self.info_stderr = False
self.rows = 0
self.columns = 0
self.truncate = 0
self.redact = True
self.sensitive = set()
if os.isatty(0):
self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2]
def __warning(self, message):
"""
:type message: str
"""
self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr)
def review_warnings(self):
"""Review all warnings which previously occurred."""
if not self.warnings:
return
self.__warning('Reviewing previous %d warning(s):' % len(self.warnings))
for warning in self.warnings:
self.__warning(warning)
def warning(self, message, unique=False):
"""
:type message: str
:type unique: bool
"""
if unique:
if message in self.warnings_unique:
return
self.warnings_unique.add(message)
self.__warning(message)
self.warnings.append(message)
def notice(self, message):
"""
:type message: str
"""
self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr)
def error(self, message):
"""
:type message: str
"""
self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr)
def info(self, message, verbosity=0, truncate=False):
"""
:type message: str
:type verbosity: int
:type truncate: bool
"""
if self.verbosity >= verbosity:
color = self.verbosity_colors.get(verbosity, self.yellow)
self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate)
def print_message(self, message, color=None, fd=sys.stdout, truncate=False): # pylint: disable=locally-disabled, invalid-name
"""
:type message: str
:type color: str | None
:type fd: file
:type truncate: bool
"""
if self.redact and self.sensitive:
for item in self.sensitive:
if not item:
continue
message = message.replace(item, '*' * len(item))
if truncate:
if len(message) > self.truncate > 5:
message = message[:self.truncate - 5] + ' ...'
if color and self.color:
# convert color resets in message to desired color
message = message.replace(self.clear, color)
message = '%s%s%s' % (color, message, self.clear)
if sys.version_info[0] == 2:
message = to_bytes(message)
print(message, file=fd)
fd.flush()
class ApplicationError(Exception):
"""General application error."""
class ApplicationWarning(Exception):
"""General application warning which interrupts normal program flow."""
class SubprocessError(ApplicationError):
"""Error resulting from failed subprocess execution."""
def __init__(self, cmd, status=0, stdout=None, stderr=None, runtime=None):
"""
:type cmd: list[str]
:type status: int
:type stdout: str | None
:type stderr: str | None
:type runtime: float | None
"""
message = 'Command "%s" returned exit status %s.\n' % (' '.join(cmd_quote(c) for c in cmd), status)
if stderr:
message += '>>> Standard Error\n'
message += '%s%s\n' % (stderr.strip(), Display.clear)
if stdout:
message += '>>> Standard Output\n'
message += '%s%s\n' % (stdout.strip(), Display.clear)
message = message.strip()
super(SubprocessError, self).__init__(message)
self.cmd = cmd
self.message = message
self.status = status
self.stdout = stdout
self.stderr = stderr
self.runtime = runtime
class MissingEnvironmentVariable(ApplicationError):
"""Error caused by missing environment variable."""
def __init__(self, name):
"""
:type name: str
"""
super(MissingEnvironmentVariable, self).__init__('Missing environment variable: %s' % name)
self.name = name
def docker_qualify_image(name):
"""
:type name: str
:rtype: str
"""
config = get_docker_completion().get(name, {})
return config.get('name', name)
def parse_to_list_of_dict(pattern, value):
"""
:type pattern: str
:type value: str
:return: list[dict[str, str]]
"""
matched = []
unmatched = []
for line in value.splitlines():
match = re.search(pattern, line)
if match:
matched.append(match.groupdict())
else:
unmatched.append(line)
if unmatched:
raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched)))
return matched
def get_available_port():
"""
:rtype: int
"""
# this relies on the kernel not reusing previously assigned ports immediately
socket_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(socket_fd):
socket_fd.bind(('', 0))
return socket_fd.getsockname()[1]
def get_subclasses(class_type): # type: (t.Type[C]) -> t.Set[t.Type[C]]
"""Returns the set of types that are concrete subclasses of the given type."""
subclasses = set() # type: t.Set[t.Type[C]]
queue = [class_type] # type: t.List[t.Type[C]]
while queue:
parent = queue.pop()
for child in parent.__subclasses__():
if child not in subclasses:
if not inspect.isabstract(child):
subclasses.add(child)
queue.append(child)
return subclasses
def is_subdir(candidate_path, path): # type: (str, str) -> bool
"""Returns true if candidate_path is path or a subdirectory of path."""
if not path.endswith(os.sep):
path += os.sep
if not candidate_path.endswith(os.sep):
candidate_path += os.sep
return candidate_path.startswith(path)
def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str]
"""Returns a list of directories extracted from the given list of paths."""
dir_names = set()
for path in paths:
while True:
path = os.path.dirname(path)
if not path or path == os.path.sep:
break
dir_names.add(path + os.path.sep)
return sorted(dir_names)
def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None
"""
Import plugins from the given directory relative to the given root.
If the root is not provided, the 'lib' directory for the test runner will be used.
"""
if root is None:
root = os.path.dirname(__file__)
path = os.path.join(root, directory)
package = __name__.rsplit('.', 1)[0]
prefix = '%s.%s.' % (package, directory.replace(os.sep, '.'))
for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix):
module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.sep) + '.py')
load_module(module_path, name)
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None
"""
Load plugins of the specified type and track them in the specified database.
Only plugins which have already been imported will be loaded.
"""
plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]]
for plugin in plugins:
database[plugin] = plugins[plugin]
def load_module(path, name): # type: (str, str) -> None
"""Load a Python module using the given name and path."""
if name in sys.modules:
return
if sys.version_info >= (3, 4):
# noinspection PyUnresolvedReferences
import importlib.util
# noinspection PyUnresolvedReferences
spec = importlib.util.spec_from_file_location(name, path)
# noinspection PyUnresolvedReferences
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[name] = module
else:
# noinspection PyDeprecation
import imp
with open(path, 'r') as module_file:
# noinspection PyDeprecation
imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE))
@contextlib.contextmanager
def tempdir(): # type: () -> str
"""Creates a temporary directory that is deleted outside the context scope."""
temp_path = tempfile.mkdtemp()
yield temp_path
shutil.rmtree(temp_path)
@contextlib.contextmanager
def open_zipfile(path, mode='r'):
"""Opens a zip file and closes the file automatically."""
zib_obj = zipfile.ZipFile(path, mode=mode)
yield zib_obj
zib_obj.close()
display = Display() # pylint: disable=locally-disabled, invalid-name
| gpl-3.0 | 9,083,042,423,914,896,000 | 26.790598 | 138 | 0.601607 | false |
gopchandani/ryu | ryu/tests/unit/ofproto/test_ofproto.py | 2 | 3001 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import eq_
LOG = logging.getLogger('test_ofproto')
class TestOfprotCommon(unittest.TestCase):
""" Test case for ofproto
"""
def test_ofp_event(self):
import ryu.ofproto
reload(ryu.ofproto)
import ryu.controller.ofp_event
reload(ryu.controller.ofp_event)
def test_ofproto(self):
# When new version of OFP support is added,
# this test must be updated.
import ryu.ofproto
reload(ryu.ofproto)
ofp_modules = ryu.ofproto.get_ofp_modules()
import ryu.ofproto.ofproto_v1_0
import ryu.ofproto.ofproto_v1_2
import ryu.ofproto.ofproto_v1_3
import ryu.ofproto.ofproto_v1_4
import ryu.ofproto.ofproto_v1_5
eq_(set(ofp_modules.keys()), set([ryu.ofproto.ofproto_v1_0.OFP_VERSION,
ryu.ofproto.ofproto_v1_2.OFP_VERSION,
ryu.ofproto.ofproto_v1_3.OFP_VERSION,
ryu.ofproto.ofproto_v1_4.OFP_VERSION,
ryu.ofproto.ofproto_v1_5.OFP_VERSION,
]))
consts_mods = set([ofp_mod[0] for ofp_mod in ofp_modules.values()])
eq_(consts_mods, set([ryu.ofproto.ofproto_v1_0,
ryu.ofproto.ofproto_v1_2,
ryu.ofproto.ofproto_v1_3,
ryu.ofproto.ofproto_v1_4,
ryu.ofproto.ofproto_v1_5,
]))
parser_mods = set([ofp_mod[1] for ofp_mod in ofp_modules.values()])
import ryu.ofproto.ofproto_v1_0_parser
import ryu.ofproto.ofproto_v1_2_parser
import ryu.ofproto.ofproto_v1_3_parser
import ryu.ofproto.ofproto_v1_4_parser
import ryu.ofproto.ofproto_v1_5_parser
eq_(parser_mods, set([ryu.ofproto.ofproto_v1_0_parser,
ryu.ofproto.ofproto_v1_2_parser,
ryu.ofproto.ofproto_v1_3_parser,
ryu.ofproto.ofproto_v1_4_parser,
ryu.ofproto.ofproto_v1_5_parser,
]))
| apache-2.0 | -3,789,969,255,951,991,300 | 39.554054 | 79 | 0.581806 | false |
jdilallo/jdilallo-test | examples/dfp/v201311/custom_field_service/update_custom_fields.py | 1 | 2345 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates custom field descriptions.
To determine which custom fields exist, run get_all_custom_fields.py.
Tags: CustomFieldService.updateCustomFields
CustomFieldService.getCustomFieldByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate classes from the client library.
from googleads import dfp
# Set the ID of the custom field to update.
CUSTOM_FIELD_ID = 'INSERT_CUSTOM_FIELD_ID_HERE'
def main(client, custom_field_id):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201311')
# Create statement to get a custom field.
values = [{
'key': 'customFieldId',
'value': {
'xsi_type': 'NumberValue',
'value': custom_field_id
}
}]
query = 'WHERE id = :customFieldId'
statement = dfp.FilterStatement(query, values)
# Get custom field.
custom_fields = custom_field_service.getCustomsFieldByStatement(
statement.ToStatement())
if custom_fields:
for custom_field in custom_fields:
custom_field['description'] += ' Updated.'
# Update the custom field on the server.
custom_fields = custom_field_service.updateCustomFields(custom_fields)
# Display results.
for custom_field in custom_fields:
print (('Custom field with ID \'%s\', name \'%s\', and '
'description \'%s\' was updated.')
% (custom_field['id'], custom_field['name'],
custom_field['description']))
else:
print 'No custom fields found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_FIELD_ID)
| apache-2.0 | 7,011,441,882,691,880,000 | 30.689189 | 74 | 0.689552 | false |
onursumer/cbioportal | core/src/main/scripts/updateClinicalAttributesTable.py | 1 | 16595 | #! /usr/bin/env python
# ------------------------------------------------------------------------------
# Script which updates clinical_attribute table within the cBioPortal database.
# The following properties must be specified in portal.properties:
#
# db.portal_db_name
# db.user
# db.password
# db.host
# google.id
# google.pw
# importer.spreadsheet_service_appname
# importer.spreadsheet
# importer.clinical_attributes worksheet
#
# For each clinical attribute listed in the clinical_attributes google worksheet,
# the script will update the respective record in the clinical_attribute table.
#
# ------------------------------------------------------------------------------
import os
import sys
import getopt
import MySQLdb
import smtplib
import gdata.docs.client
import gdata.docs.service
import gdata.spreadsheet.service
# ------------------------------------------------------------------------------
# globals
# some file descriptors
ERROR_FILE = sys.stderr
OUTPUT_FILE = sys.stdout
# fields in portal.properties
CGDS_DATABASE_HOST = 'db.host'
CGDS_DATABASE_NAME = 'db.portal_db_name'
CGDS_DATABASE_USER = 'db.user'
CGDS_DATABASE_PW = 'db.password'
GOOGLE_ID = 'google.id'
GOOGLE_PW = 'google.pw'
IMPORTER_SPREADSHEET = 'importer.spreadsheet'
CLINICAL_ATTRIBUTES_WORKSHEET = 'importer.clinical_attributes_worksheet'
IMPORTER_SPREADSHEET_SERVICE_APPNAME = 'importer.spreadsheet_service_appname'
# a ref to the google spreadsheet client - used for all i/o to google spreadsheet
GOOGLE_SPREADSHEET_CLIENT = gdata.spreadsheet.service.SpreadsheetsService()
# column constants on google spreadsheet
CLINICAL_ATTRIBUTES_KEY = 'normalizedcolumnheader'
CLINICAL_ATTRIBUTES_DISPLAY_NAME = 'displayname'
CLINICAL_ATTRIBUTES_DESCRIPTION = 'descriptions'
CLINICAL_ATTRIBUTES_DATATYPE = 'datatype'
CLINICAL_ATTRIBUTES_ATTRIBUTE_TYPE = 'attributetype'
CLINICAL_ATTRIBUTES_PRIORITY = 'priority'
CLINICAL_ATTRIBUTE_TYPE_PATIENT = "PATIENT"
# ------------------------------------------------------------------------------
# class definitions
class PortalProperties(object):
def __init__(self,
cgds_database_host,
cgds_database_name, cgds_database_user, cgds_database_pw,
google_id, google_pw, google_spreadsheet, google_worksheet, app_name):
self.cgds_database_host = cgds_database_host
self.cgds_database_name = cgds_database_name
self.cgds_database_user = cgds_database_user
self.cgds_database_pw = cgds_database_pw
self.google_id = google_id
self.google_pw = google_pw
self.google_spreadsheet = google_spreadsheet
self.google_worksheet = google_worksheet
self.app_name = app_name
class ClinicalAttribute(object):
def __init__(self, normalized_column_header, display_name, description, datatype, patient_attribute, priority):
self.normalized_column_header = normalized_column_header
self.display_name = display_name
self.description = description
self.datatype = datatype
self.patient_attribute = patient_attribute
self.priority = priority
# ------------------------------------------------------------------------------
# sub-routines
# ------------------------------------------------------------------------------
# logs into google spreadsheet client
def google_login(user, pw, app_name):
# google spreadsheet
GOOGLE_SPREADSHEET_CLIENT.email = user
GOOGLE_SPREADSHEET_CLIENT.password = pw
GOOGLE_SPREADSHEET_CLIENT.source = app_name
GOOGLE_SPREADSHEET_CLIENT.ProgrammaticLogin()
# ------------------------------------------------------------------------------
# given a feed & feed name, returns its id
#
def get_feed_id(feed, name):
to_return = ''
for entry in feed.entry:
if entry.title.text.strip() == name:
id_parts = entry.id.text.split('/')
to_return = id_parts[len(id_parts) - 1]
return to_return
# ------------------------------------------------------------------------------
# gets a worksheet feed
def get_worksheet_feed(ss, ws):
ss_id = get_feed_id(GOOGLE_SPREADSHEET_CLIENT.GetSpreadsheetsFeed(), ss)
ws_id = get_feed_id(GOOGLE_SPREADSHEET_CLIENT.GetWorksheetsFeed(ss_id), ws)
return GOOGLE_SPREADSHEET_CLIENT.GetListFeed(ss_id, ws_id)
# ------------------------------------------------------------------------------
# get db connection
def get_db_connection(portal_properties):
# try and create a connection to the db
try:
connection = MySQLdb.connect(host=portal_properties.cgds_database_host, port=3306,
user=portal_properties.cgds_database_user,
passwd=portal_properties.cgds_database_pw,
db=portal_properties.cgds_database_name)
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return None
return connection
# ------------------------------------------------------------------------------
# parse portal.properties
def get_portal_properties(portal_properties_filename):
properties = {}
portal_properties_file = open(portal_properties_filename, 'r')
for line in portal_properties_file:
line = line.strip()
# skip line if its blank or a comment
if len(line) == 0 or line.startswith('#'):
continue
# store name/value
property = line.split('=')
if len(property) != 2:
print >> ERROR_FILE, 'Skipping invalid entry in property file: ' + line
continue
properties[property[0]] = property[1].strip()
portal_properties_file.close()
# error check
if (CGDS_DATABASE_HOST not in properties or len(properties[CGDS_DATABASE_HOST]) == 0 or
CGDS_DATABASE_NAME not in properties or len(properties[CGDS_DATABASE_NAME]) == 0 or
CGDS_DATABASE_USER not in properties or len(properties[CGDS_DATABASE_USER]) == 0 or
CGDS_DATABASE_PW not in properties or len(properties[CGDS_DATABASE_PW]) == 0 or
GOOGLE_ID not in properties or len(properties[GOOGLE_ID]) == 0 or
GOOGLE_PW not in properties or len(properties[GOOGLE_PW]) == 0 or
IMPORTER_SPREADSHEET not in properties or len(properties[IMPORTER_SPREADSHEET]) == 0 or
CLINICAL_ATTRIBUTES_WORKSHEET not in properties or len(properties[CLINICAL_ATTRIBUTES_WORKSHEET]) == 0 or
IMPORTER_SPREADSHEET_SERVICE_APPNAME not in properties or len(properties[IMPORTER_SPREADSHEET_SERVICE_APPNAME]) == 0):
print >> ERROR_FILE, 'Missing one or more required properties, please check property file'
return None
# return an instance of PortalProperties
return PortalProperties(properties[CGDS_DATABASE_HOST],
properties[CGDS_DATABASE_NAME],
properties[CGDS_DATABASE_USER],
properties[CGDS_DATABASE_PW],
properties[GOOGLE_ID],
properties[GOOGLE_PW],
properties[IMPORTER_SPREADSHEET],
properties[CLINICAL_ATTRIBUTES_WORKSHEET],
properties[IMPORTER_SPREADSHEET_SERVICE_APPNAME])
# ------------------------------------------------------------------------------
# get clinical attributes from clinical_attribute portal database table
def get_db_clinical_attributes(cursor):
# map that we are returning
# key is the clinical attribute name and value is a ClinicalAtttribute object
to_return = {}
try:
cursor.execute('select * from clinical_attribute')
for row in cursor.fetchall():
to_return[row[0]] = ClinicalAttribute(row[0], row[1], row[2], row[3], row[4], row[5])
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return None
return to_return
# ------------------------------------------------------------------------------
# checks validity of google worksheet record
def valid_worksheet_entry(normalized_column_header, display_name, description, datatype, priority):
if normalized_column_header == None or len(normalized_column_header) == 0:
return False
if display_name == None or len(display_name) == 0:
return False
if description == None or len(description) == 0:
return False
if datatype == None or len(datatype) == 0:
return False
if priority == None or len(priority) == 0:
return False
return True
# ------------------------------------------------------------------------------
# get clinical attributes from google worksheet
def get_worksheet_clinical_attributes(worksheet_feed):
# map that we are returning
# key is the clinical attribute name (tormalized column header) and value is a ClinicalAttribute object
to_return = {}
for entry in worksheet_feed.entry:
normalized_column_header = entry.custom[CLINICAL_ATTRIBUTES_KEY].text
display_name = entry.custom[CLINICAL_ATTRIBUTES_DISPLAY_NAME].text
description = entry.custom[CLINICAL_ATTRIBUTES_DESCRIPTION].text
datatype = entry.custom[CLINICAL_ATTRIBUTES_DATATYPE].text
if entry.custom[CLINICAL_ATTRIBUTES_ATTRIBUTE_TYPE].text == CLINICAL_ATTRIBUTE_TYPE_PATIENT:
patient_attribute = 1
else:
patient_attribute = 0
priority = entry.custom[CLINICAL_ATTRIBUTES_PRIORITY].text
if valid_worksheet_entry(normalized_column_header, display_name, description, datatype, priority):
to_return[normalized_column_header] = ClinicalAttribute(normalized_column_header, display_name, description, datatype, patient_attribute, priority)
else:
print >> OUTPUT_FILE, "An attribute from the worksheet is missing a value, skipping: %s" % entry
return to_return
# ------------------------------------------------------------------------------
# updates the clinical attribute record in the portal clinical_attribute database table
# returns boolean indicating success or failure
def update_clinical_attributes_in_db(cursor, clinical_attributes):
try:
cursor.executemany("update clinical_attribute set display_name=%s, description=%s," +
" datatype=%s, patient_attribute=%s, priority=%s where attr_id = %s",
[(clinical_attribute.display_name, clinical_attribute.description, clinical_attribute.datatype,
clinical_attribute.patient_attribute, clinical_attribute.priority, clinical_attribute.normalized_column_header) for clinical_attribute in clinical_attributes])
except MySQLdb.Error, msg:
print >> ERROR_FILE, msg
return False
return True
# ------------------------------------------------------------------------------
# gets list of clinical attributes that need updating
def get_clinical_attributes_to_update(worksheet_clinical_attributes, database_clinical_attributes):
# map that we are returning
# key is the clinical attribute name and value is a ClinicalAttribute object
to_return = {}
for db_clinical_attribute in database_clinical_attributes.values():
worksheet_clinical_attribute = worksheet_clinical_attributes.get(db_clinical_attribute.normalized_column_header)
if (worksheet_clinical_attribute is None): continue
if (db_clinical_attribute.display_name != worksheet_clinical_attribute.display_name or
db_clinical_attribute.description != worksheet_clinical_attribute.description or
db_clinical_attribute.datatype != worksheet_clinical_attribute.datatype or
db_clinical_attribute.patient_attribute != worksheet_clinical_attribute.patient_attribute or
db_clinical_attribute.priority != worksheet_clinical_attribute.priority):
to_return[worksheet_clinical_attribute.normalized_column_header] = ClinicalAttribute(worksheet_clinical_attribute.normalized_column_header,
worksheet_clinical_attribute.display_name,
worksheet_clinical_attribute.description,
worksheet_clinical_attribute.datatype,
worksheet_clinical_attribute.patient_attribute,
worksheet_clinical_attribute.priority)
return to_return
# ------------------------------------------------------------------------------
# updates clinical attribute records in the clinical_attribute table in the
# portal database with values from the clinical_attributes google worksheet
def update_clinical_attributes(cursor, worksheet_feed):
# get map of cancer studies from worksheet
print >> OUTPUT_FILE, 'Getting list of clinical attributes from google worksheet'
worksheet_clinical_attributes_map = get_worksheet_clinical_attributes(worksheet_feed)
if worksheet_clinical_attributes_map is not None:
print >> OUTPUT_FILE, 'We have found %s clinical attributes in worksheet' % len(worksheet_clinical_attributes_map)
else:
print >> OUTPUT_FILE, 'Error reading clinical attributes from worksheet'
# get map of clinical attributes from database
print >> OUTPUT_FILE, 'Getting list of clinical attributes from portal database'
database_clinical_attributes_map = get_db_clinical_attributes(cursor)
if database_clinical_attributes_map is not None:
print >> OUTPUT_FILE, 'We have found %s clinical attributes in portal database' % len(database_clinical_attributes_map)
else:
print >> OUTPUT_FILE, 'Error reading clinical attributes from portal database'
# get list of clinical attributes to update
print >> OUTPUT_FILE, 'Checking for clinical attributes that require updates'
clinical_attributes_needing_update_map = get_clinical_attributes_to_update(worksheet_clinical_attributes_map, database_clinical_attributes_map)
# update clinical attributes as needed
if len(clinical_attributes_needing_update_map) > 0:
print >> OUTPUT_FILE, 'We have %s clinical attributes that require an update' % len(clinical_attributes_needing_update_map)
for attr_id in clinical_attributes_needing_update_map.keys():
print >> OUTPUT_FILE, 'Updating "%s" clinical attribute' % attr_id
success = update_clinical_attributes_in_db(cursor, clinical_attributes_needing_update_map.values())
if success:
print >> OUTPUT_FILE, 'Successfully updated clinical attributes in database'
else:
print >> OUTPUT_FILE, 'Error updating clinical attributes in database'
else:
print >> OUTPUT_FILE, 'No clinical attributes to update, exiting'
# ------------------------------------------------------------------------------
# displays program usage (invalid args)
def usage():
print >> OUTPUT_FILE, 'updateClinicalAttributesTable.py --properties-file [properties file]'
# ------------------------------------------------------------------------------
# the big deal main.
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], '', ['properties-file='])
except getopt.error, msg:
print >> ERROR_FILE, msg
usage()
sys.exit(2)
# process the options
properties_filename = ''
for o, a in opts:
if o == '--properties-file':
properties_filename = a
if properties_filename == '':
usage()
sys.exit(2)
# check existence of file
if not os.path.exists(properties_filename):
print >> ERROR_FILE, 'properties file cannot be found: ' + properties_filename
sys.exit(2)
# parse/get relevant portal properties
print >> OUTPUT_FILE, 'Reading portal properties file: ' + properties_filename
portal_properties = get_portal_properties(properties_filename)
if not portal_properties:
print >> OUTPUT_FILE, 'Error reading %s, exiting' % properties_filename
return
# get db connection & create cursor
print >> OUTPUT_FILE, 'Connecting to database: ' + portal_properties.cgds_database_name
connection = get_db_connection(portal_properties)
if connection is not None:
cursor = connection.cursor()
else:
print >> OUTPUT_FILE, 'Error connecting to database, exiting'
return
# login to google and get spreadsheet feed
google_login(portal_properties.google_id, portal_properties.google_pw, portal_properties.app_name)
worksheet_feed = get_worksheet_feed(portal_properties.google_spreadsheet,
portal_properties.google_worksheet)
# update the clinical attributes
update_clinical_attributes(cursor, worksheet_feed)
# clean up
cursor.close()
connection.commit()
connection.close()
# ------------------------------------------------------------------------------
# ready to roll
if __name__ == '__main__':
main()
| agpl-3.0 | 8,564,231,414,605,613,000 | 40.4875 | 160 | 0.639892 | false |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/obfsproxy/network/buffer.py | 19 | 1998 | class Buffer(object):
"""
A Buffer is a simple FIFO buffer. You write() stuff to it, and you
read() them back. You can also peek() or drain() data.
"""
def __init__(self, data=''):
"""
Initialize a buffer with 'data'.
"""
self.buffer = bytes(data)
def read(self, n=-1):
"""
Read and return 'n' bytes from the buffer.
If 'n' is negative, read and return the whole buffer.
If 'n' is larger than the size of the buffer, read and return
the whole buffer.
"""
if (n < 0) or (n > len(self.buffer)):
the_whole_buffer = self.buffer
self.buffer = bytes('')
return the_whole_buffer
data = self.buffer[:n]
self.buffer = self.buffer[n:]
return data
def write(self, data):
"""
Append 'data' to the buffer.
"""
self.buffer = self.buffer + data
def peek(self, n=-1):
"""
Return 'n' bytes from the buffer, without draining them.
If 'n' is negative, return the whole buffer.
If 'n' is larger than the size of the buffer, return the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
return self.buffer
return self.buffer[:n]
def drain(self, n=-1):
"""
Drain 'n' bytes from the buffer.
If 'n' is negative, drain the whole buffer.
If 'n' is larger than the size of the buffer, drain the whole
buffer.
"""
if (n < 0) or (n > len(self.buffer)):
self.buffer = bytes('')
return
self.buffer = self.buffer[n:]
return
def __len__(self):
"""Returns length of buffer. Used in len()."""
return len(self.buffer)
def __nonzero__(self):
"""
Returns True if the buffer is non-empty.
Used in truth-value testing.
"""
return True if len(self.buffer) else False
| gpl-2.0 | -3,339,923,523,502,183,000 | 25.64 | 70 | 0.521021 | false |
kivy/kivy | kivy/geometry.py | 4 | 3792 | '''
Geometry utilities
==================
This module contains some helper functions for geometric calculations.
'''
__all__ = ('circumcircle', 'minimum_bounding_circle')
from kivy.vector import Vector
def circumcircle(a, b, c):
'''
Computes the circumcircle of a triangle defined by a, b, c.
See: http://en.wikipedia.org/wiki/Circumscribed_circle
:Parameters:
`a`: iterable containing at least 2 values (for x and y)
The 1st point of the triangle.
`b`: iterable containing at least 2 values (for x and y)
The 2nd point of the triangle.
`c`: iterable containing at least 2 values (for x and y)
The 3rd point of the triangle.
:Return:
A tuple that defines the circle :
* The first element in the returned tuple is the center as (x, y)
* The second is the radius (float)
'''
P = Vector(a[0], a[1])
Q = Vector(b[0], b[1])
R = Vector(c[0], c[1])
mPQ = (P + Q) * .5
mQR = (Q + R) * .5
numer = -(- mPQ.y * R.y + mPQ.y * Q.y + mQR.y * R.y - mQR.y * Q.y -
mPQ.x * R.x + mPQ.x * Q.x + mQR.x * R.x - mQR.x * Q.x)
denom = (-Q.x * R.y + P.x * R.y - P.x * Q.y +
Q.y * R.x - P.y * R.x + P.y * Q.x)
t = numer / denom
cx = -t * (Q.y - P.y) + mPQ.x
cy = t * (Q.x - P.x) + mPQ.y
return ((cx, cy), (P - (cx, cy)).length())
def minimum_bounding_circle(points):
'''
Returns the minimum bounding circle for a set of points.
For a description of the problem being solved, see the `Smallest Circle
Problem <http://en.wikipedia.org/wiki/Smallest_circle_problem>`_.
The function uses Applet's Algorithm, the runtime is ``O(h^3, *n)``,
where h is the number of points in the convex hull of the set of points.
**But** it runs in linear time in almost all real world cases.
See: http://tinyurl.com/6e4n5yb
:Parameters:
`points`: iterable
A list of points (2 tuple with x,y coordinates)
:Return:
A tuple that defines the circle:
* The first element in the returned tuple is the center (x, y)
* The second the radius (float)
'''
points = [Vector(p[0], p[1]) for p in points]
if len(points) == 1:
return (points[0].x, points[0].y), 0.0
if len(points) == 2:
p1, p2 = points
return (p1 + p2) * .5, ((p1 - p2) * .5).length()
# determine a point P with the smallest y value
P = min(points, key=lambda p: p.y)
# find a point Q such that the angle of the line segment
# PQ with the x axis is minimal
def x_axis_angle(q):
if q == P:
return 1e10 # max val if the same, to skip
return abs((q - P).angle((1, 0)))
Q = min(points, key=x_axis_angle)
for p in points:
# find R such that angle PRQ is minimal
def angle_pq(r):
if r in (P, Q):
return 1e10 # max val if the same, to skip
return abs((r - P).angle(r - Q))
R = min(points, key=angle_pq)
# check for case 1 (angle PRQ is obtuse), the circle is determined
# by two points, P and Q. radius = |(P-Q)/2|, center = (P+Q)/2
if angle_pq(R) > 90.0:
return (P + Q) * .5, ((P - Q) * .5).length()
# if angle RPQ is obtuse, make P = R, and try again
if abs((R - P).angle(Q - P)) > 90:
P = R
continue
# if angle PQR is obtuse, make Q = R, and try again
if abs((P - Q).angle(R - Q)) > 90:
Q = R
continue
# all angles were acute..we just need the circle through the
# two points furthest apart!
break
# find the circumcenter for triangle given by P,Q,R
return circumcircle(P, Q, R)
| mit | -4,329,429,996,711,527,000 | 30.338843 | 76 | 0.548787 | false |
msg/g2ools | nord/convert/filter.py | 1 | 11049 | #
# filter.py - Filter tab conversion objects
#
# Copyright (c) 2006,2007 Matt Gerassimoff
#
# This file is part of g2ools.
#
# g2ools is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# g2ools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from nord.utils import setv, getv
from nord.units import nm1fltfreq, g2fltfreq, fltfreq_map
from nord.convert import Convert
from nord.convert.convert import handlekbt, updatevals
from nord.convert.table import modtable, lphpfreq
class ConvFilter(Convert):
def domodule(self):
updatevals(self.g2module.params, ['Freq'], fltfreq_map)
class ConvFilterA(ConvFilter):
maing2module = 'FltLP'
parammap = ['Freq']
inputmap = ['In']
outputmap = ['Out']
lphp = 0
def domodule(self):
ConvFilter.domodule(self)
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Freq, lphpfreq[getv(nmmp.Freq)][self.lphp])
class ConvFilterB(ConvFilterA):
maing2module = 'FltHP'
parammap = ['Freq']
inputmap = ['In']
outputmap = ['Out']
lphp = 1
class ConvFilterC(ConvFilter):
maing2module = 'FltMulti'
parammap = ['Freq', 'Res', ['GC', 'GainControl']]
inputmap = ['In']
outputmap = ['LP', 'BP', 'HP']
class ConvFilterD(ConvFilter):
maing2module = 'FltMulti'
parammap = ['Freq', None, 'Res', ['PitchMod', 'FreqMod']]
inputmap = ['PitchVar', 'In']
outputmap = ['HP', 'BP', 'LP']
def domodule(self):
ConvFilter.domodule(self)
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
self.kbt = g2mp.Kbt
handlekbt(self, g2m.inputs.Pitch, 4) # 4=Kbt 100%
# copied from convert.py for osc.py (maybe it can be unified?)
def fltdualpitchmod(nmm, g2m, conv, mod1, mod2):
p1 = p2 = None
if len(nmm.inputs.FreqMod1.cables) and len(nmm.inputs.FreqMod2.cables):
mix21b = conv.add_module('Mix2-1B', name='FreqMod')
setv(mix21b.params.ExpLin, 1) # lin
conv.connect(mix21b.outputs.Out, g2m.inputs.PitchVar)
setv(g2m.params.PitchMod, 127)
p1, p2 = mix21b.inputs.In1, mix21b.inputs.In2
setv(mix21b.params.Lev1, getv(nmm.params.FreqMod1))
conv.params[mod1] = mix21b.params.Lev1
setv(mix21b.params.Lev2, getv(nmm.params.FreqMod2))
conv.params[mod2] = mix21b.params.Lev2
elif len(nmm.inputs.FreqMod1.cables):
p1 = g2m.inputs.PitchVar
setv(g2m.params.PitchMod, getv(nmm.params.FreqMod1))
conv.params[mod1] = g2m.params.PitchMod
elif len(nmm.inputs.FreqMod2.cables):
p2 = g2m.inputs.PitchVar
setv(g2m.params.PitchMod, getv(nmm.params.FreqMod2))
conv.params[mod2] = g2m.params.PitchMod
return p1, p2
class ConvFilterE(ConvFilter):
maing2module = 'FltNord'
parammap = ['FilterType', ['GC', 'GainControl'], None,
'Freq', None, 'ResMod', 'Res',
'Slope', None, ['Active', 'Bypass']]
inputmap = ['PitchVar', 'Res', 'In', '']
outputmap = ['Out']
def domodule(self):
ConvFilter.domodule(self)
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Active, 1-getv(nmmp.Bypass))
# handle special inputs
p1, p2 = fltdualpitchmod(nmm, g2m, self, 2, 8)
self.inputs[0] = p1
self.inputs[3] = p2
self.kbt = g2mp.Kbt
handlekbt(self, g2m.inputs.Pitch, 4) # 4=Kbt 100%
class ConvFilterF(ConvFilter):
maing2module = 'FltClassic'
parammap = ['Freq', None, 'Res', None, None,
'Slope', ['Active', 'Bypass']]
inputmap = ['PitchVar', '', 'In', '']
outputmap = ['Out']
def domodule(self):
ConvFilter.domodule(self)
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Active, 1-getv(nmmp.Bypass))
# handle special inputs
p1, p2 = fltdualpitchmod(nmm, g2m, self, 3, 4)
self.inputs[0:2] = p1, p2
self.kbt = g2mp.Kbt
handlekbt(self, g2m.inputs.Pitch, 4) # 4=Kbt 100%
class ConvVocalFilter(ConvFilter):
maing2module = 'FltVoice'
parammap = ['Vowel1', 'Vowel2', 'Vowel3', 'Level', 'Vowel', 'VowelMod',
'Freq', 'FreqMod', 'Res']
inputmap = ['In', 'VowelMod', 'FreqMod']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Level, modtable[getv(nmmp.Level)][0])
class ConvVocoder(Convert):
maing2module = 'Vocoder'
parammap = ['Band%d' % i for i in xrange(1, 17)]+[None, 'Emphasis', 'Monitor']
inputmap = ['Ctrl', 'In']
outputmap = ['Out']
class ConvFilterBank(Convert):
maing2module = 'LevAmp'
parammap = [None]*14
inputmap = ['In']
outputmap = ['']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Type, 0) # Lin
setv(g2mp.Gain, 44)
hc18 = self.add_module('FltLP', name='HC 1-8')
hc18.modes.SlopeMode.value = 2 # 18
setv(hc18.params.Kbt, 0)
setv(hc18.params.Freq, 87)
self.connect(g2m.outputs.Out, hc18.inputs.In)
lp1 = self.add_module('FltLP', name='')
lp1.modes.SlopeMode.value = 1 # 12
setv(lp1.params.Kbt, 0)
setv(lp1.params.Freq, 55)
self.connect(hc18.outputs.Out, lp1.inputs.In)
band13 = self.add_module('FltLP', name='1-3')
band13.modes.SlopeMode.value = 2 # 18
setv(band13.params.Kbt, 0)
setv(band13.params.Freq, 36)
self.connect(lp1.outputs.Out, band13.inputs.In)
band45 = self.add_module('FltStatic', name='4-5')
setv(band45.params.Freq, 51)
setv(band45.params.Res, 44)
setv(band45.params.FilterType, 1) # BP
setv(band45.params.GC, 1)
self.connect(band13.inputs.In, band45.inputs.In)
band45out = self.add_module('LevConv', name='4-5 Out')
setv(band45out.params.InputType, 0) # Bip
setv(band45out.params.OutputType, 5) # BipInv
self.connect(band45.outputs.Out, band45out.inputs.In)
lc68 = self.add_module('FltHP', name='LC 6-8')
lc68.modes.SlopeMode.value = 1 # 12
setv(lc68.params.Kbt, 0)
setv(lc68.params.Freq, 57)
self.connect(lp1.inputs.In, lc68.inputs.In)
levconv1 = self.add_module('LevConv', name='')
setv(levconv1.params.InputType, 0) # Bip
setv(levconv1.params.OutputType, 5) # BipInv
self.connect(lc68.outputs.Out, levconv1.inputs.In)
band6 = self.add_module('FltStatic', name='6')
setv(band6.params.Freq, 57)
setv(band6.params.Res, 75)
setv(band6.params.FilterType, 1) # BP
setv(band6.params.GC, 0)
self.connect(levconv1.outputs.Out, band6.inputs.In)
band7 = self.add_module('FltStatic', name='7')
setv(band7.params.Freq, 65)
setv(band7.params.Res, 74)
setv(band7.params.FilterType, 1) # BP
setv(band7.params.GC, 1)
self.connect(band6.inputs.In, band7.inputs.In)
band8 = self.add_module('FltStatic', name='8')
setv(band8.params.Freq, 71)
setv(band8.params.Res, 74)
setv(band8.params.FilterType, 1) # BP
setv(band8.params.GC, 1)
self.connect(band7.inputs.In, band8.inputs.In)
lc914 = self.add_module('FltHP', name='LC 9-14')
lc914.modes.SlopeMode.value = 3 # 24
setv(lc914.params.Kbt, 0)
setv(lc914.params.Freq, 76)
self.connect(hc18.inputs.In, lc914.inputs.In)
band910 = self.add_module('FltStatic', name='9-10')
setv(band910.params.Freq, 83)
setv(band910.params.Res, 29)
setv(band910.params.FilterType, 1) # BP
setv(band910.params.GC, 0)
self.connect(lc914.outputs.Out, band910.inputs.In)
band1112 = self.add_module('FltStatic', name='11-12')
setv(band1112.params.Freq, 97)
setv(band1112.params.Res, 30)
setv(band1112.params.FilterType, 1) # BP
setv(band1112.params.GC, 0)
self.connect(band910.inputs.In, band1112.inputs.In)
band1314 = self.add_module('FltHP', name='13-14')
band1314.modes.SlopeMode.value = 3 # 24
setv(band1314.params.Kbt, 0)
setv(band1314.params.Freq, 99)
self.connect(band910.inputs.In, band1314.inputs.In)
band1314out = self.add_module('LevConv', name='13-14 Out')
setv(band1314out.params.InputType, 0) # Bip
setv(band1314out.params.OutputType, 5) # BipInv
self.connect(band1314.outputs.Out, band1314out.inputs.In)
mixfader = self.add_module('MixFader', name='FilterBank')
mixfaderp = mixfader.params
onnms = ['1-3', '4-5', '6', '7', '8', '9-10', '11-12', '13-14']
setv(mixfaderp.ExpLin, 2) # dB
for i in xrange(len(onnms)):
onp = getattr(mixfaderp, 'On%d'%(i+1))
setv(onp, 1)
onp.labels = [onnms[i]]
def gv(p, nm):
return getv(getattr(p, nm))
setv(mixfaderp.Lev1, (gv(nmmp, '50')+gv(nmmp, '75')+gv(nmmp, '110'))/3)
setv(mixfaderp.Lev2, (gv(nmmp, '170')+gv(nmmp, '250'))/2)
setv(mixfaderp.Lev3, gv(nmmp, '380'))
setv(mixfaderp.Lev4, gv(nmmp, '570'))
setv(mixfaderp.Lev5, gv(nmmp, '850'))
setv(mixfaderp.Lev6, (gv(nmmp, '1.3')+gv(nmmp, '1.9'))/2)
setv(mixfaderp.Lev7, (gv(nmmp, '2.9')+gv(nmmp, '4.2'))/2)
setv(mixfaderp.Lev8, (gv(nmmp, '6.4')+gv(nmmp, '8.3'))/2)
self.connect(band13.outputs.Out, mixfader.inputs.In1)
self.connect(band45out.outputs.Out, mixfader.inputs.In2)
self.connect(band6.outputs.Out, mixfader.inputs.In3)
self.connect(band7.outputs.Out, mixfader.inputs.In4)
self.connect(band8.outputs.Out, mixfader.inputs.In5)
self.connect(band910.outputs.Out, mixfader.inputs.In6)
self.connect(band1112.outputs.Out, mixfader.inputs.In7)
self.connect(band1314out.outputs.Out, mixfader.inputs.In8)
mix11a = self.add_module('Mix1-1A', name='Out/Boost')
setv(mix11a.params.On, 1)
mix11a.params.On.labels = ['Out']
setv(mix11a.params.Lev, 110)
self.connect(mixfader.outputs.Out, mix11a.inputs.Chain)
self.connect(mix11a.outputs.Out, mix11a.inputs.In)
self.outputs[0] = mix11a.outputs.Out
class ConvEqMid(ConvFilter):
maing2module = 'EqPeak'
parammap = ['Freq', 'Gain', 'Bandwidth', ['Active', 'Bypass'], 'Level']
inputmap = ['In']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Active, 1-getv(nmmp.Bypass))
setv(g2mp.Level, modtable[getv(nmmp.Level)][0])
class ConvEqShelving(ConvFilter):
maing2module = 'EqPeak'
parammap = ['Freq', 'Gain', None, ['Active', 'Bypass'], 'Level']
inputmap = ['In']
outputmap = ['Out']
def domodule(self):
nmm, g2m = self.nmmodule, self.g2module
nmmp, g2mp = nmm.params, g2m.params
setv(g2mp.Bandwidth, 0)
setv(g2mp.Active, 1-getv(nmmp.Bypass))
setv(g2mp.Level, modtable[getv(nmmp.Level)][0])
| gpl-2.0 | -8,236,602,028,368,334,000 | 33.313665 | 80 | 0.663408 | false |
MSPARP/newparp | newparp/views/characters.py | 1 | 4091 | import json
from flask import g, jsonify, redirect, request, render_template, url_for
from sqlalchemy.orm import joinedload
from newparp.helpers import alt_formats
from newparp.helpers.auth import activation_required
from newparp.helpers.characters import character_query, save_character_from_form, validate_character_form
from newparp.model import case_options, Character, CharacterTag, Fandom, SearchCharacter, SearchCharacterGroup
from newparp.model.connections import use_db
@alt_formats({"json"})
@use_db
@activation_required
def character_list(fmt=None):
characters = g.db.query(Character).filter(
Character.user_id == g.user.id,
).order_by(Character.title, Character.id).all()
if fmt == "json":
return jsonify({ "characters": [_.to_dict(include_default=True) for _ in characters] })
return render_template(
"characters/character_list.html",
characters=characters,
)
@use_db
@activation_required
def new_character_get():
fandoms = (
g.db.query(Fandom)
.order_by(Fandom.name)
.options(
joinedload(Fandom.groups)
.joinedload(SearchCharacterGroup.characters)
).all()
)
character_defaults = {_.name: _.default.arg for _ in Character.__table__.columns if _.default}
character_defaults["search_character"] = fandoms[0].groups[0].characters[0]
return render_template(
"characters/character.html",
character=character_defaults,
replacements=[],
regexes=[],
character_tags={},
fandoms=fandoms,
case_options=case_options,
)
@use_db
@activation_required
def new_character_post():
new_details = validate_character_form(request.form)
g.db.add(Character(user_id=g.user.id, **new_details))
return redirect(url_for("rp_character_list"))
@alt_formats({"json"})
@use_db
@activation_required
def character(character_id, fmt=None):
character = character_query(character_id, join_tags=True)
fandoms = (
g.db.query(Fandom)
.order_by(Fandom.name)
.options(
joinedload(Fandom.groups)
.joinedload(SearchCharacterGroup.characters)
).all()
)
if fmt == "json":
return jsonify(character.to_dict(include_default=True, include_options=True))
return render_template(
"characters/character.html",
character=character,
replacements=json.loads(character.replacements),
regexes=json.loads(character.regexes),
character_tags={
tag_type: ", ".join(tag["alias"] for tag in tags)
for tag_type, tags in character.tags_by_type().items()
},
fandoms=fandoms,
case_options=case_options,
)
@use_db
@activation_required
def save_character(character_id):
# In a separate function so we can call it from request search.
character = save_character_from_form(character_id, request.form)
return redirect(url_for("rp_character_list"))
@use_db
@activation_required
def delete_character_get(character_id):
character = character_query(character_id)
return render_template("characters/delete_character.html", character=character)
@use_db
@activation_required
def delete_character_post(character_id):
character = character_query(character_id)
character_id = character.id
if g.user.default_character_id == character_id:
g.user.default_character_id = None
if g.user.roulette_character_id == character_id:
g.user.roulette_character_id = None
g.db.flush()
g.db.query(CharacterTag).filter(CharacterTag.character_id == character_id).delete()
# Don't use g.db.delete(character) because it does a load of extra queries
# for foreign keys and stuff.
g.db.query(Character).filter(Character.id == character_id).delete()
return redirect(url_for("rp_character_list"))
@use_db
@activation_required
def set_default_character(character_id):
character = character_query(character_id)
g.user.default_character = character
return redirect(url_for("rp_character_list"))
| agpl-3.0 | 6,142,355,101,788,195,000 | 28.861314 | 110 | 0.681496 | false |
wda-hb/test | registration/tests/forms.py | 27 | 5663 | from __future__ import unicode_literals
from distutils.version import StrictVersion
from django import get_version
from django.test import TestCase
from registration import forms
from registration.users import UserModel
DJANGO_VERSION = StrictVersion(get_version())
class RegistrationFormTests(TestCase):
"""
Test the default registration forms.
"""
def test_registration_form(self):
"""
Test that ``RegistrationForm`` enforces username constraints
and matching passwords.
"""
# Create a user so we can verify that duplicate usernames aren't
# permitted.
UserModel().objects.create_user('alice', '[email protected]', 'secret')
bad_username_error = 'This value may contain only letters, numbers and @/./+/-/_ characters.'
if DJANGO_VERSION >= StrictVersion('1.8'):
bad_username_error = 'Enter a valid username. ' + bad_username_error
invalid_data_dicts = [
# Non-alphanumeric username.
{'data': {'username': 'foo/bar',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo'},
'error': ('username', [bad_username_error])},
# Already-existing username.
{'data': {'username': 'alice',
'email': '[email protected]',
'password1': 'secret',
'password2': 'secret'},
'error': ('username', ["A user with that username already exists."])},
# Mismatched passwords.
{'data': {'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'bar'},
'error': ('password2', ["The two password fields didn't match."])},
]
for invalid_dict in invalid_data_dicts:
form = forms.RegistrationForm(data=invalid_dict['data'])
self.failIf(form.is_valid())
self.assertEqual(form.errors[invalid_dict['error'][0]],
invalid_dict['error'][1])
form = forms.RegistrationForm(data={'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo'})
self.failUnless(form.is_valid())
def test_registration_form_tos(self):
"""
Test that ``RegistrationFormTermsOfService`` requires
agreement to the terms of service.
"""
form = forms.RegistrationFormTermsOfService(data={'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo'})
self.failIf(form.is_valid())
self.assertEqual(form.errors['tos'],
["You must agree to the terms to register"])
form = forms.RegistrationFormTermsOfService(data={'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo',
'tos': 'on'})
self.failUnless(form.is_valid())
def test_registration_form_unique_email(self):
"""
Test that ``RegistrationFormUniqueEmail`` validates uniqueness
of email addresses.
"""
# Create a user so we can verify that duplicate addresses
# aren't permitted.
UserModel().objects.create_user('alice', '[email protected]', 'secret')
form = forms.RegistrationFormUniqueEmail(data={'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo'})
self.failIf(form.is_valid())
self.assertEqual(form.errors['email'],
["This email address is already in use. Please supply a different email address."])
form = forms.RegistrationFormUniqueEmail(data={'username': 'foo',
'email': '[email protected]',
'password1': 'foo',
'password2': 'foo'})
self.failUnless(form.is_valid())
def test_registration_form_no_free_email(self):
"""
Test that ``RegistrationFormNoFreeEmail`` disallows
registration with free email addresses.
"""
base_data = {'username': 'foo',
'password1': 'foo',
'password2': 'foo'}
for domain in forms.RegistrationFormNoFreeEmail.bad_domains:
invalid_data = base_data.copy()
invalid_data['email'] = "foo@%s" % domain
form = forms.RegistrationFormNoFreeEmail(data=invalid_data)
self.failIf(form.is_valid())
self.assertEqual(form.errors['email'],
["Registration using free email addresses is prohibited. Please supply a different email address."])
base_data['email'] = '[email protected]'
form = forms.RegistrationFormNoFreeEmail(data=base_data)
self.failUnless(form.is_valid())
| bsd-3-clause | 8,009,391,814,884,395,000 | 42.229008 | 129 | 0.492848 | false |
enoex/django-shop | shop/util/login_mixin.py | 20 | 2133 | """
A mixin class that provides view securing functionality to class based views
similar to the @login_required() decorator.
"""
from django.conf import settings
from django.contrib.auth import logout
from django.contrib.auth.decorators import user_passes_test
from django.http import HttpResponseRedirect
class LoginMixin(object):
"""
Mixin for securing a class.
Taken from here:
https://groups.google.com/d/msg/django-users/g2E_6ZYN_R0/tnB9b262lcAJ
"""
def do_logout(self, request):
"""Logs the user out if necessary."""
logout(request)
return HttpResponseRedirect(self.get_login_url())
def get_test_func(self):
"""
Returns the function that is being used to test if a user is
authenticated.
"""
return get_test_func(getattr(self, 'test_func', None))
def get_login_url(self):
"""Returns the login URL."""
return getattr(self, 'login_url', None)
def get_redirect_field_name(self):
"""Returns the redirect_field_name."""
return getattr(self, 'redirect_field_name', None)
def dispatch(self, request, *args, **kwargs):
test_kwargs = {}
login_url = self.get_login_url()
if login_url:
test_kwargs['login_url'] = login_url
redirect_field_name = self.get_redirect_field_name()
if redirect_field_name:
test_kwargs['redirect_field_name'] = redirect_field_name
return user_passes_test(
self.get_test_func(),
**test_kwargs)(super(LoginMixin, self).dispatch)(
request, *args, **kwargs)
def get_test_func(test_func=None):
"""
Returns the test function to be used for authentication and takes the
setting `SHOP_FORCE_LOGIN` into consideration.
:param test_func: Optional. You can provide your own test function for
authentication. This should be a lambda expression.
"""
if getattr(settings, 'SHOP_FORCE_LOGIN', False):
if test_func:
return test_func
return lambda u: u.is_authenticated()
else:
return lambda u: True
| bsd-3-clause | 472,386,506,311,975,360 | 31.318182 | 76 | 0.639944 | false |
orione7/plugin.video.streamondemand-pureita | lib/sambatools/nmb/utils.py | 10 | 1510 | import re
import string
def encode_name(name, type, scope = None):
"""
Perform first and second level encoding of name as specified in RFC 1001 (Section 4)
"""
if name == '*':
name = name + '\0' * 15
elif len(name) > 15:
name = name[:15] + chr(type)
else:
name = string.ljust(name, 15) + chr(type)
def _do_first_level_encoding(m):
s = ord(m.group(0))
return string.uppercase[s >> 4] + string.uppercase[s & 0x0f]
encoded_name = chr(len(name) * 2) + re.sub('.', _do_first_level_encoding, name)
if scope:
encoded_scope = ''
for s in string.split(scope, '.'):
encoded_scope = encoded_scope + chr(len(s)) + s
return encoded_name + encoded_scope + '\0'
else:
return encoded_name + '\0'
def decode_name(name):
name_length = ord(name[0])
assert name_length == 32
def _do_first_level_decoding(m):
s = m.group(0)
return chr(((ord(s[0]) - ord('A')) << 4) | (ord(s[1]) - ord('A')))
decoded_name = re.sub('..', _do_first_level_decoding, name[1:33])
if name[33] == '\0':
return 34, decoded_name, ''
else:
decoded_domain = ''
offset = 34
while 1:
domain_length = ord(name[offset])
if domain_length == 0:
break
decoded_domain = '.' + name[offset:offset + domain_length]
offset = offset + domain_length
return offset + 1, decoded_name, decoded_domain
| gpl-3.0 | -1,968,895,061,272,961,500 | 29.2 | 88 | 0.538411 | false |
benbest86/watsancomm | watsancomm.py | 1 | 2202 | import os, logging, datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from google.appengine.dist import use_library
use_library('django', '1.2')
try:
from settings import CUTOFF_DAY, REMINDER_DAY
except ImportError:
CUTOFF_DAY = 6
REMINDER_DAY = 4
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from models import WeeklyUpdate
class PreviewWeekly(webapp.RequestHandler):
def get(self):
content = WeeklyUpdate.generate_summary_content(WeeklyUpdate.get_weekly_updates())
email = WeeklyUpdate.generate_summary_email(content)
self.response.out.write(email.html)
class SendUpdate(webapp.RequestHandler):
def get(self):
content = WeeklyUpdate.generate_summary_content(WeeklyUpdate.get_weekly_updates())
email = WeeklyUpdate.generate_summary_email(content)
email.send()
class SendReminder(webapp.RequestHandler):
def get(self):
if datetime.date.today().weekday() == REMINDER_DAY:
logging.info('Reminder scheduled today!')
email = WeeklyUpdate.generate_reminder_email()
if email is not None:
logging.info('Sending')
email.send()
else:
logging.info('Everyone has sent updates! Skipping reminder.')
else:
logging.info('No reminder scheduled today.')
class SendScheduledUpdate(SendUpdate):
def get(self):
if datetime.date.today().weekday() == CUTOFF_DAY:
logging.info('Update scheduled today! Sending.')
return super(SendScheduledUpdate, self).get()
else:
logging.info('No update scheduled today.')
application = webapp.WSGIApplication([
('/main/preview', PreviewWeekly),
('/main/send_update', SendUpdate),
('/main/send_reminder', SendReminder),
('/main/send_scheduled_update', SendScheduledUpdate),
], debug=True,)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| bsd-3-clause | -5,441,149,783,211,365,000 | 36.322034 | 93 | 0.611262 | false |
NeCTAR-RC/nova | nova/api/openstack/compute/legacy_v2/contrib/availability_zone.py | 1 | 6020 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import availability_zones
from nova import context as nova_context
from nova import objects
from nova import servicegroup
CONF = cfg.CONF
authorize_list = extensions.extension_authorizer('compute',
'availability_zone:list')
authorize_detail = extensions.extension_authorizer('compute',
'availability_zone:detail')
class AvailabilityZoneController(wsgi.Controller):
"""The Availability Zone API controller for the OpenStack API."""
def __init__(self):
super(AvailabilityZoneController, self).__init__()
self.servicegroup_api = servicegroup.API()
def _get_filtered_availability_zones(self, zones, is_available):
result = []
for zone in zones:
# Hide internal_service_availability_zone
if zone == CONF.internal_service_availability_zone:
continue
result.append({'zoneName': zone,
'zoneState': {'available': is_available},
"hosts": None})
return result
def _describe_availability_zones(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
filtered_available_zones = \
self._get_filtered_availability_zones(available_zones, True)
filtered_not_available_zones = \
self._get_filtered_availability_zones(not_available_zones, False)
return {'availabilityZoneInfo': filtered_available_zones +
filtered_not_available_zones}
def _describe_availability_zones_verbose(self, context, **kwargs):
ctxt = context.elevated()
available_zones, not_available_zones = \
availability_zones.get_availability_zones(ctxt)
# Available services
enabled_services = objects.ServiceList.get_all(context, disabled=False,
set_zones=True)
zone_hosts = {}
host_services = {}
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
for service in enabled_services:
if service.binary in api_services:
# Skip API services in the listing since they are not
# maintained in the same way as other services
continue
zone_hosts.setdefault(service['availability_zone'], [])
if service['host'] not in zone_hosts[service['availability_zone']]:
zone_hosts[service['availability_zone']].append(
service['host'])
host_services.setdefault(service['availability_zone'] +
service['host'], [])
host_services[service['availability_zone'] + service['host']].\
append(service)
result = []
for zone in available_zones:
hosts = {}
for host in zone_hosts.get(zone, []):
hosts[host] = {}
for service in host_services[zone + host]:
alive = self.servicegroup_api.service_is_up(service)
hosts[host][service['binary']] = {'available': alive,
'active': True != service['disabled'],
'updated_at': service['updated_at']}
result.append({'zoneName': zone,
'zoneState': {'available': True},
"hosts": hosts})
for zone in not_available_zones:
result.append({'zoneName': zone,
'zoneState': {'available': False},
"hosts": None})
return {'availabilityZoneInfo': result}
def index(self, req):
"""Returns a summary list of availability zone."""
context = req.environ['nova.context']
authorize_list(context)
return self._describe_availability_zones(context)
def detail(self, req):
"""Returns a detailed list of availability zone."""
context = req.environ['nova.context']
authorize_detail(context)
# NOTE(alex_xu): back-compatible with db layer hard-code admin
# permission checks.
nova_context.require_admin_context(context)
if CONF.cells.enable:
# verbose doesn't work for cells
return self._describe_availability_zones(context)
return self._describe_availability_zones_verbose(context)
class Availability_zone(extensions.ExtensionDescriptor):
"""1. Add availability_zone to the Create Server v1.1 API.
2. Add availability zones describing.
"""
name = "AvailabilityZone"
alias = "os-availability-zone"
namespace = ("http://docs.openstack.org/compute/ext/"
"availabilityzone/api/v1.1")
updated = "2012-12-21T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-availability-zone',
AvailabilityZoneController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
| apache-2.0 | -9,053,240,428,898,729,000 | 39.952381 | 79 | 0.590698 | false |
crmccreary/openerp_server | openerp/addons/account_multistep_income_statement/__init__.py | 1 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import report
import account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,846,309,633,315,130,000 | 42.52 | 78 | 0.621324 | false |
reviewboard/rbintegrations | docs/releasenotes/_ext/extralinks.py | 2 | 1536 | """Sphinx plugins for special links in the Release Notes."""
from __future__ import unicode_literals
from docutils import nodes, utils
def setup(app):
app.add_config_value('bugtracker_url', '', True)
app.add_role('bug', bug_role)
app.add_role('cve', cve_role)
def bug_role(role, rawtext, text, linenum, inliner, options={}, content=[]):
try:
bugnum = int(text)
if bugnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'Bug number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text,
line=linenum)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
bugtracker_url = inliner.document.settings.env.config.bugtracker_url
if not bugtracker_url or '%s' not in bugtracker_url:
msg = inliner.reporter.error('bugtracker_url must be configured.',
line=linenum)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
ref = bugtracker_url % bugnum
node = nodes.reference(rawtext, 'Bug #' + utils.unescape(text),
refuri=ref, **options)
return [node], []
def cve_role(role, rawtext, text, linenum, inliner, options={}, content=[]):
ref = 'http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s' % text
node = nodes.reference(rawtext, 'CVE-' + utils.unescape(text),
refuri=ref, **options)
return [node], []
| mit | -262,050,317,709,809,540 | 31.680851 | 76 | 0.59375 | false |
sajuptpm/magnum | magnum/opts.py | 3 | 2082 | # Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import magnum.api.app
import magnum.api.auth
import magnum.common.clients
import magnum.common.exception
import magnum.common.magnum_keystoneclient
import magnum.conductor.config
import magnum.conductor.handlers.bay_conductor
import magnum.conductor.handlers.docker_conductor
import magnum.conductor.handlers.k8s_conductor
import magnum.conductor.template_definition
import magnum.db.sqlalchemy.models
def list_opts():
return [
('DEFAULT',
itertools.chain(magnum.api.auth.AUTH_OPTS,
magnum.common.magnum_keystoneclient.trust_opts,
magnum.common.paths.PATH_OPTS,
magnum.common.utils.UTILS_OPTS,
magnum.common.rpc_service.periodic_opts
)),
('api', magnum.api.app.API_SERVICE_OPTS),
('bay', magnum.conductor.template_definition.template_def_opts),
('conductor', magnum.conductor.config.SERVICE_OPTS),
('database', magnum.db.sqlalchemy.models.sql_opts),
('docker', magnum.conductor.handlers.docker_conductor.docker_opts),
('magnum_client', magnum.common.clients.magnum_client_opts),
('heat_client', magnum.common.clients.heat_client_opts),
('glance_client', magnum.common.clients.glance_client_opts),
('bay_heat', magnum.conductor.handlers.bay_conductor.bay_heat_opts),
('kubernetes',
magnum.conductor.k8s_api.kubernetes_opts),
]
| apache-2.0 | -7,473,204,428,388,660,000 | 39.823529 | 77 | 0.696446 | false |
kgeorgy/django-rest-framework | tests/test_prefetch_related.py | 10 | 2016 | from django.contrib.auth.models import Group, User
from django.test import TestCase
from rest_framework import generics, serializers
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'groups')
class UserUpdate(generics.UpdateAPIView):
queryset = User.objects.exclude(username='exclude').prefetch_related('groups')
serializer_class = UserSerializer
class TestPrefetchRelatedUpdates(TestCase):
def setUp(self):
self.user = User.objects.create(username='tom', email='[email protected]')
self.groups = [Group.objects.create(name='a'), Group.objects.create(name='b')]
self.user.groups.set(self.groups)
def test_prefetch_related_updates(self):
view = UserUpdate.as_view()
pk = self.user.pk
groups_pk = self.groups[0].pk
request = factory.put('/', {'username': 'new', 'groups': [groups_pk]}, format='json')
response = view(request, pk=pk)
assert User.objects.get(pk=pk).groups.count() == 1
expected = {
'id': pk,
'username': 'new',
'groups': [1],
'email': '[email protected]'
}
assert response.data == expected
def test_prefetch_related_excluding_instance_from_original_queryset(self):
"""
Regression test for https://github.com/encode/django-rest-framework/issues/4661
"""
view = UserUpdate.as_view()
pk = self.user.pk
groups_pk = self.groups[0].pk
request = factory.put('/', {'username': 'exclude', 'groups': [groups_pk]}, format='json')
response = view(request, pk=pk)
assert User.objects.get(pk=pk).groups.count() == 1
expected = {
'id': pk,
'username': 'exclude',
'groups': [1],
'email': '[email protected]'
}
assert response.data == expected
| bsd-2-clause | -2,665,319,804,283,417,000 | 33.758621 | 97 | 0.610615 | false |
jwmcglynn/videosync | server/room_controller.py | 1 | 14611 | import models.room as room_model
import video_resolver
import vote_controller
import itertools
import unicodedata
from services.common import UrlError
from services.youtube import VideoError
active_rooms = dict()
NoSuchRoomException = room_model.NoSuchRoomException
def filter_non_printable(s):
# Strip unwanted characters: http://en.wikipedia.org/wiki/Mapping_of_Unicode_characters
PRINTABLE = set(("Lu", "Ll", "Nd", "Pc", "Zs"))
result = filter(lambda x: unicodedata.category(x) in PRINTABLE, unicode(s))
return u"".join(result).strip()
class CaseInsensitiveDict(dict):
def __setitem__(self, key, value):
super(CaseInsensitiveDict, self).__setitem__(key.lower(), value)
def __getitem__(self, key):
return super(CaseInsensitiveDict, self).__getitem__(key.lower())
def __delitem__(self, key):
return super(CaseInsensitiveDict, self).__delitem__(key.lower())
def __contains__(self, key):
return super(CaseInsensitiveDict, self).__contains__(key.lower())
class EventSource(object):
def __init__(self):
self.__callbacks = []
def add_callback(self, callback):
self.__callbacks.append(callback)
def remove_callback(self, callback):
self.__callbacks.remove(callback)
def invoke(self, source, *args):
for callback in self.__callbacks:
callback(source, *args)
def __len__(self):
return len(self.__callbacks)
def get_instance(room_id):
if room_id in active_rooms:
return active_rooms[room_id]
else:
room = RoomController(room_id)
active_rooms[room_id] = room
return room
class CommandError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class RoomController:
def __init__(self, room_id):
self.__room = room_model.Room(room_id)
self.__active_users = []
self.__user_lookup = CaseInsensitiveDict()
self.__queue = self.__room.video_queue()
self.__moderator = None
self.__current_video = None
self.__current_video_time = 0.0
self.__video_playing = False
self.__event_user_connect = EventSource()
self.__event_user_disconnect = EventSource()
self.__event_video_changed = EventSource()
self.__event_moderator_changed = EventSource()
self.__vote_skip = None
self.__vote_mutiny = None
if len(self.__queue):
self.__current_video = self.__queue[0]
@property
def event_user_connect(self):
return self.__event_user_connect
@property
def event_user_disconnect(self):
return self.__event_user_disconnect
@property
def event_video_changed(self):
return self.__event_video_changed
@property
def event_moderator_changed(self):
return self.__event_moderator_changed
def process_message(self, user_session, message):
try:
try:
if message["command"] == "guest_username":
self.process_guest_username(user_session, message)
elif message["command"] == "add_video":
self.process_add_video(user_session, message)
elif message["command"] == "vote_skip":
self.process_vote_skip(user_session, message)
elif message["command"] == "vote_mutiny" and user_session != self.__moderator:
self.process_vote_mutiny(user_session, message)
elif message["command"] == "chat_message":
self.process_chat_message(user_session, message)
elif user_session == self.__moderator:
# Moderator-level commands.
if message["command"] == "give_moderator":
self.process_give_moderator(user_session, message)
elif message["command"] == "update_video_state":
self.process_update_video_state(user_session, message)
elif message["command"] == "select_video":
self.process_select_video(user_session, message)
elif message["command"] == "move_video":
self.process_move_video(user_session, message)
elif message["command"] == "remove_video":
self.process_remove_video(user_session, message)
elif message["command"] == "vote_mutiny_cancel":
self.process_vote_mutiny_cancel(user_session, message)
else:
raise CommandError("Unknown command.")
else:
raise CommandError("Unknown command.")
except KeyError:
raise CommandError("Protocol error.")
except (CommandError, UrlError), error:
if "command" in message:
context = message["command"]
else:
context = "unknown"
user_session.send(
{"command": "command_error"
, "context": context
, "reason": error.message})
def process_guest_username(self, user_session, message):
if not user_session.is_guest or user_session.has_changed_username:
raise CommandError("Cannot change username.")
username = filter_non_printable(message["username"])
if "*" in username:
raise CommandError("Usernames cannot contain asterisks.")
elif len(username) == 0:
if len(message["username"]) == 0:
raise CommandError("Username too short.")
else:
raise CommandError("Username doesn't contain any printable characters!")
elif len(username) > 30:
raise CommandError("Username too long. The maximum length is 30 characters.")
# Check to see if there any duplicate usernames.
guest_username = "*%s*" % username
if username in self.__user_lookup or guest_username in self.__user_lookup:
raise CommandError("Username already in use.")
old_username = user_session.username
user_session.change_username(username)
del self.__user_lookup[old_username]
self.__user_lookup[user_session.username] = user_session
self.broadcast(
{"command": "guest_username_changed"
, "old_username": old_username
, "username": user_session.username})
def process_add_video(self, user_session, message):
def on_video_resolve_error(error):
if type(error.value) == VideoError:
user_session.send(
{"command": "command_error"
, "context": "add_video"
, "reason": error.value.message})
d = video_resolver.resolve(message["url"])
d.addCallbacks(self.on_video_resolved, on_video_resolve_error)
def process_vote_skip(self, user_session, message):
if not self.__vote_skip:
self.__vote_skip = vote_controller.VoteSkipController(self)
self.__vote_skip.vote(self, user_session)
def process_vote_mutiny(self, user_session, message):
if not self.__vote_mutiny:
self.__vote_mutiny = vote_controller.VoteMutinyController(self)
self.__vote_mutiny.vote(self, user_session)
def process_chat_message(self, user_session, message):
content = message["message"].strip()
if len(content) == 0:
raise CommandError("Message cannot be empty.")
self.broadcast(
{"command": "chat_message"
, "username": user_session.username
, "message": content})
def process_give_moderator(self, user_session, message):
new_moderator = self.lookup_user(message["username"])
if new_moderator is None:
raise CommandError("Username not found.")
self.update_moderator(new_moderator)
def process_update_video_state(self, user_session, message):
self.__video_playing = message["state"] == "playing"
self.__current_video_time = float(message["position"])
self.broadcast_all_but_one(
user_session
, {"command": "video_state"
, "position": self.__current_video_time
, "state": message["state"]})
def process_select_video(self, user_session, message):
video = self.lookup_video(int(message["item_id"]))
if video is None:
raise CommandError("Video not found.")
self.__current_video = video
self.__current_video_time = 0.0
self.broadcast(
{"command": "change_video"
, "video": self.serialize_video(video)})
self.event_video_changed.invoke(self, video)
def process_move_video(self, user_session, message):
video = self.lookup_video(int(message["item_id"]))
if video is None:
raise CommandError("Video not found.")
target_index = message["index"]
if target_index != int(target_index):
raise CommandError("Invalid index.")
if target_index < 0 or target_index >= len(self.__queue):
raise CommandError("Index out of range.")
if len(self.__queue) == 1:
return
def list_queue():
return map(lambda x: x.item_id, self.__queue)
self.__queue.remove(video)
self.__queue.insert(target_index, video)
# Update rank.
if target_index == 0:
video.update_rank(self.__queue[1].rank - 1.0)
elif target_index == len(self.__queue) - 1:
video.update_rank(self.__queue[target_index - 1].rank + 1.0)
else:
assert len(self.__queue) >= 3
video.update_rank((self.__queue[target_index - 1].rank + self.__queue[target_index + 1].rank) * 0.5)
self.broadcast(
{"command": "move_queue_video"
, "item_id": message["item_id"]
, "index": message["index"]})
def process_remove_video(self, user_session, message):
video = self.lookup_video(int(message["item_id"]))
if video is None:
raise CommandError("Video not found.")
removed_index = self.__queue.index(video)
self.__queue.remove(video)
video.remove()
self.broadcast(
{"command": "remove_queue_video"
, "item_id": message["item_id"]})
if video.item_id == self.__current_video.item_id:
if len(self.__queue) > 0:
new_index = removed_index
if new_index == len(self.__queue):
new_index -= 1
self.__current_video = self.__queue[new_index]
self.broadcast(
{"command": "change_video"
, "video": self.serialize_video(self.__current_video)})
else:
self.__current_video = None
self.__current_video_time = 0.0
self.event_video_changed.invoke(self, self.__current_video)
def process_vote_mutiny_cancel(self, user_session, message):
if self.__vote_mutiny is not None:
self.__vote_mutiny.moderator_cancel(self)
#### Broadcasting.
def broadcast(self, message):
for session in self.__active_users:
session.send(message)
def broadcast_all_but_one(self, excluded_session, message):
for session in self.__active_users:
if session != excluded_session:
session.send(message)
#### Users.
@property
def active_users(self):
return self.__active_users
def next_guest_username(self):
def username_generator():
for i in itertools.count():
yield "unnamed %d" % i
for username in username_generator():
guest_username = "*%s*" % username
if guest_username not in self.__user_lookup:
return username
def user_connect(self, user_session):
user_session.send(
{"command": "room_joined"
, "username": user_session.username})
self.broadcast(
{"command": "user_connect"
, "username": user_session.username})
self.__active_users.append(user_session)
self.__user_lookup[user_session.username] = user_session
self.event_user_connect.invoke(self, user_session)
# If this is the only user make them moderator.
if self.__moderator is None:
# Only update variable, send_initial_state will send the set_moderator message.
self.__moderator = user_session
self.send_initial_state(user_session)
def user_disconnect(self, user_session):
self.__active_users.remove(user_session)
del self.__user_lookup[user_session.username]
self.event_user_disconnect.invoke(self, user_session)
if len(self.__active_users) == 0:
del active_rooms[self.__room.room_id]
else:
self.broadcast(
{"command": "user_disconnect"
, "username": user_session.username})
if self.__moderator == user_session:
# Pick the oldest connected user as the new moderator.
self.update_moderator(self.__active_users[0])
def update_moderator(self, user_session):
self.__moderator = user_session
self.broadcast(
{"command": "set_moderator"
, "username": user_session.username})
self.event_moderator_changed.invoke(self, user_session)
def lookup_user(self, username):
for user in self.__active_users:
if username == user.username:
return user
return None
def send_initial_state(self, user_session):
user_session.send(
{"command": "initial_users"
, "users": map(lambda x: x.username, self.__active_users)})
user_session.send(
{"command": "set_moderator"
, "username": self.__moderator.username})
user_session.send(
{"command": "initial_queue"
, "queue": map(lambda x: self.serialize_video(x), self.__queue)})
if self.__current_video is not None:
user_session.send(
{"command": "change_video"
, "video": self.serialize_video(self.__current_video)})
if self.__video_playing or self.__current_video_time > 0.0:
user_session.send(
{"command": "video_state"
, "state": "playing" if self.__video_playing else "paused"
, "time": self.__current_video_time})
#### Videos.
def lookup_video(self, item_id):
for video in self.__queue:
if item_id == video.item_id:
return video
return None
def serialize_video(self, video):
return {
"item_id": video.item_id
, "service": video.service
, "url": video.url
, "title": video.title
, "duration": video.duration
, "start_time": video.start_time
}
def advance_video(self):
if self.__current_video is not None and len(self.__queue) > 1:
index = self.__queue.index(self.__current_video)
index += 1
if index == len(self.__queue):
index = 0
video = self.__queue[index]
self.broadcast(
{"command": "change_video"
, "video": self.serialize_video(video)})
self.__current_video = video
self.__current_video_time = 0.0
self.event_video_changed.invoke(self, video)
def on_video_resolved(self, video_info):
for video in self.__queue:
if video_info.url == video.url:
return
video = self.__room.add_video(
video_info.service
, video_info.url
, video_info.title
, video_info.duration
, video_info.start_time)
self.__queue.append(video)
serialized_video = self.serialize_video(video)
self.broadcast(
{"command": "add_queue_video"
, "video": serialized_video})
if len(self.__queue) == 1:
self.broadcast(
{"command": "change_video"
, "video": serialized_video})
self.__current_video = video
self.__current_video_time = 0.0
self.event_video_changed.invoke(self, video)
#### Voting.
def vote_skip_remove(self):
assert(self.__vote_skip)
self.__vote_skip.unregister(self)
self.__vote_skip = None
def vote_mutiny_remove(self):
assert(self.__vote_mutiny)
self.__vote_mutiny.unregister(self)
self.__vote_mutiny = None
| isc | -637,770,963,327,679,900 | 28.695378 | 103 | 0.657381 | false |
BackupGGCode/python-for-android | python3-alpha/python3-src/Doc/includes/mp_webserver.py | 48 | 2056 | #
# Example where a pool of http servers share a single listening socket
#
# On Windows this module depends on the ability to pickle a socket
# object so that the worker processes can inherit a copy of the server
# object. (We import `multiprocessing.reduction` to enable this pickling.)
#
# Not sure if we should synchronize access to `socket.accept()` method by
# using a process-shared lock -- does not seem to be necessary.
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import os
import sys
from multiprocessing import Process, current_process, freeze_support
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
if sys.platform == 'win32':
import multiprocessing.reduction # make sockets pickable/inheritable
def note(format, *args):
sys.stderr.write('[%s]\t%s\n' % (current_process().name, format % args))
class RequestHandler(SimpleHTTPRequestHandler):
# we override log_message() to show which process is handling the request
def log_message(self, format, *args):
note(format, *args)
def serve_forever(server):
note('starting server')
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def runpool(address, number_of_processes):
# create a single server object -- children will each inherit a copy
server = HTTPServer(address, RequestHandler)
# create child processes to act as workers
for i in range(number_of_processes - 1):
Process(target=serve_forever, args=(server,)).start()
# main process also acts as a worker
serve_forever(server)
def test():
DIR = os.path.join(os.path.dirname(__file__), '..')
ADDRESS = ('localhost', 8000)
NUMBER_OF_PROCESSES = 4
print('Serving at http://%s:%d using %d worker processes' % \
(ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES))
print('To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'])
os.chdir(DIR)
runpool(ADDRESS, NUMBER_OF_PROCESSES)
if __name__ == '__main__':
freeze_support()
test()
| apache-2.0 | 8,410,578,707,310,270,000 | 28.371429 | 77 | 0.691634 | false |
awsdit/awsdit | aws_audit/local_helpers/ec2_helper.py | 2 | 20391 | """
ec2 helper calls.
"""
import boto3
import re
import time
import datetime
import dateutil.parser
from local_helpers import misc
def date_to_days(time_stamp):
if time_stamp:
today = datetime.datetime.now()
create_date = datetime.datetime.strptime(time_stamp, "%Y-%m-%dT%H:%M:%S.%fZ")
return str((today - create_date).days)
else:
return str('-1')
def ec2_resource(session):
#print type(session)
"""continue form multithread call
returns an ec2 resource
Args:
session (session.Session()):
"""
ec2 = session.resource('ec2')
for instance in ec2.instances.all():
#print instance.private_dns_name
print instance.id
def check_tag(obj, tag_name):
"""
returns tag_name values if tag_name exist
Args:
obj (dict): list of tags
tag_name (string): tag name value
Returns:
tag_name values (string)
"""
rfctag = None
if obj.get('Tags'):
for tag in obj.get('Tags'):
if tag.get('Key') == tag_name:
tag_value = tag.get('Value')
#tag_value = re.sub('[,]', ' / ', tag_value)
tag_value = re.sub('[,]', ' <br> ', tag_value)
return tag_value
continue
if not rfctag:
return str("no-record")
def check_port(port):
"""return port value"""
"""port value == None, means -1 or any"""
if str(port) == 'None':
return '-1'
else:
return port
def check_proto(proto):
"""return proto value"""
"""proto value == -1, means all protocols"""
if str(proto) == '-1':
return 'all'
else:
return proto
def describe_key_pairs_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"KeyName",
"Fingerprint"
))
def describe_key_pairs(ec2, account, region, output_bucket):
"""continue from multithread ec2.describe_key_pairs() call
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
for key_pair in ec2.describe_key_pairs().get('KeyPairs'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(key_pair.get('KeyName')),
misc.check_if(key_pair.get('KeyFingerprint'))
)))
def describe_instances_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"Type",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"KeyPair"
))
def describe_instances(ec2, account, region, output_bucket):
"""continue from multithread ec2.describe_instances() call
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
ec2_list = [i for r in
ec2.describe_instances().get('Reservations') for i in
r.get('Instances')]
for ec2_obj in ec2_list:
#print ec2_obj
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(ec2_obj.get('VpcId')),
misc.check_if(ec2_obj.get('InstanceId')),
misc.check_if(ec2_obj.get('InstanceType')),
misc.check_if(ec2_obj.get('State').get('Name')),
misc.check_if(check_tag(ec2_obj, str('Name'))),
misc.check_if(ec2_obj.get('PrivateIpAddress')),
misc.check_if(ec2_obj.get('PublicIpAddress')),
misc.check_if(ec2_obj.get('KeyName'))
)))
def security_group_list_header():
"""return header for security group list"""
return misc.format_line((
"Account",
"VpcId",
"Region",
"GroupID",
"Instances",
"SG-GroupName",
"RFC",
"Description"))
def security_group_list(ec2, account, region, output_bucket):
"""generate list of ec2s to check agains security groups
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
"""could not find ec2.instances() anywhere in boto3"""
ec2_list = [i for r in
ec2.describe_instances().get('Reservations') for i in
r.get('Instances')]
"""generate security group list"""
sg_list = ec2.describe_security_groups().get('SecurityGroups')
for sg_obj in sg_list:
ec2count = 0
"""find out how many ec2s are using a security group"""
for ec2_obj in ec2_list:
for sg in ec2_obj.get('SecurityGroups'):
if sg_obj.get('GroupId') == sg.get('GroupId'):
ec2count += 1
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(str(ec2count)),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(check_tag(sg_obj, str('RFC'))),
misc.check_if(re.sub('[,]', '-', sg_obj.get('Description')))
)))
def sg_rule_sets_header():
"""returns header for sg rule sets"""
return misc.format_line((
"AccountId",
"VpcId",
"Region",
"GroupId",
"SG-GroupName",
"Source",
"FromPort",
"ToPort",
"Protocol"))
def sg_rule_sets(ec2, account, region, output_bucket):
"""generate list of security group rule sets
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
"""generate security group list"""
sg_list = ec2.describe_security_groups().get('SecurityGroups')
for sg_obj in sg_list:
for rule in sg_obj.get('IpPermissions'):
"""cidr as source"""
for cidr in rule.get('IpRanges'):
if cidr.get('CidrIp'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(str(cidr.get('CidrIp'))),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
"""security groups as source"""
for group in rule.get('UserIdGroupPairs'):
if group.get('GroupId'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(group.get('GroupId')),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
def sg_rule_sets_by_ec2_header():
"""returns header for sg rule sets"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"GroupID",
"GroupName",
"Source",
"StartPort",
"EndPort",
"Protocol"))
def sg_rule_sets_by_ec2(ec2, account, region, output_bucket):
"""generate list of security group rule sets by ec2 instance
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
"""could not find ec2.instances() anywhere in boto3"""
ec2_list = [i for r in
ec2.describe_instances().get('Reservations') for i in
r.get('Instances')]
"""generate security group list"""
sg_list = ec2.describe_security_groups().get('SecurityGroups')
for sg_obj in sg_list:
"""find out how many ec2s are using a security group"""
for ec2_obj in ec2_list:
for ec2sg in ec2_obj.get('SecurityGroups'):
if sg_obj.get('GroupId') == ec2sg.get('GroupId'):
"""move on to rule entries"""
for rule in sg_obj.get('IpPermissions'):
"""cidr as source"""
for cidr in rule.get('IpRanges'):
if cidr.get('CidrIp'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(ec2_obj.get('InstanceId')),
misc.check_if(ec2_obj.get('State').get('Name')),
misc.check_if(check_tag(ec2_obj, str('Name'))),
misc.check_if(ec2_obj.get('PrivateIpAddress')),
misc.check_if(ec2_obj.get('PublicIpAddress')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(str(cidr.get('CidrIp'))),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
"""security groups as source"""
for group in rule.get('UserIdGroupPairs'):
if group.get('GroupId'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(ec2_obj.get('InstanceId')),
misc.check_if(ec2_obj.get('State').get('Name')),
misc.check_if(check_tag(ec2_obj, str('Name'))),
misc.check_if(ec2_obj.get('PrivateIpAddress')),
misc.check_if(ec2_obj.get('PublicIpAddress')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(group.get('GroupId')),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
def sg_rule_sets_by_ec2_with_role_header():
"""returns header for sg rule sets"""
return misc.format_line((
"Account",
"Region",
"VpcId",
"ec2Id",
"Role",
"State",
"ec2Name",
"PrivateIPAddress",
"PublicIPAddress",
"GroupID",
"GroupName",
"Source",
"StartPort",
"EndPort",
"Protocol"))
def sg_rule_sets_by_ec2_with_role(ec2, account, region, output_bucket):
"""generate list of security group rule sets by ec2 instance
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
"""could not find ec2.instances() anywhere in boto3"""
ec2_list = [i for r in
ec2.describe_instances().get('Reservations') for i in
r.get('Instances')]
"""generate security group list"""
sg_list = ec2.describe_security_groups().get('SecurityGroups')
for sg_obj in sg_list:
"""find out how many ec2s are using a security group"""
for ec2_obj in ec2_list:
"""check if ec2 is attached to a role"""
if ec2_obj.get('IamInstanceProfile'):
ec2_role = re.split('/',ec2_obj.get('IamInstanceProfile').get('Arn'))[1]
for ec2sg in ec2_obj.get('SecurityGroups'):
if sg_obj.get('GroupId') == ec2sg.get('GroupId'):
"""move on to rule entries"""
for rule in sg_obj.get('IpPermissions'):
"""cidr as source"""
for cidr in rule.get('IpRanges'):
if cidr.get('CidrIp'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(ec2_obj.get('InstanceId')),
misc.check_if(ec2_role),
misc.check_if(ec2_obj.get('State').get('Name')),
misc.check_if(check_tag(ec2_obj, str('Name'))),
misc.check_if(ec2_obj.get('PrivateIpAddress')),
misc.check_if(ec2_obj.get('PublicIpAddress')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(str(cidr.get('CidrIp'))),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
"""security groups as source"""
for group in rule.get('UserIdGroupPairs'):
if group.get('GroupId'):
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(sg_obj.get('VpcId')),
misc.check_if(ec2_obj.get('InstanceId')),
misc.check_if(ec2_role),
misc.check_if(ec2_obj.get('State').get('Name')),
misc.check_if(check_tag(ec2_obj, str('Name'))),
misc.check_if(ec2_obj.get('PrivateIpAddress')),
misc.check_if(ec2_obj.get('PublicIpAddress')),
misc.check_if(sg_obj.get('GroupId')),
misc.check_if(sg_obj.get('GroupName')),
misc.check_if(group.get('GroupId')),
misc.check_if(str(check_port(rule.get('FromPort')))),
misc.check_if(str(check_port(rule.get('ToPort')))),
misc.check_if(str(check_proto(rule.get('IpProtocol'))))
)))
def describe_snapshots_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"SnapshotId",
"Age",
"CreateDate",
"Size",
"Encrypted",
"Description"
))
def describe_snapshots(ec2, account, region, output_bucket):
"""continue from multithread describe_snapshots() call
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
'''extract owner_id from role'''
owner_id = str(re.split(':',account.get('role_arn'))[4])
'''get list of snapshots owned by owner_id'''
snap_list = ec2.describe_snapshots(OwnerIds=[owner_id]).get('Snapshots')
for snap_obj in snap_list:
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(str(snap_obj.get('SnapshotId'))),
misc.check_if(str(misc.date_to_days(snap_obj.get('StartTime')))),
misc.check_if(str(snap_obj.get('StartTime').strftime('%Y_%m_%d'))),
misc.check_if(str(snap_obj.get('VolumeSize'))),
misc.check_if(str(snap_obj.get('Encrypted'))),
#'''get rid of commas if present'''
misc.check_if(str(re.sub('[,]','', snap_obj.get('Description')))),
)))
def describe_images_header():
"""generate output header"""
return misc.format_line((
"Account",
"Region",
"ImageId",
"State",
"Age",
"Public",
"Name"
))
def describe_images(ec2, account, region, output_bucket):
"""continue from multithread describe_snapshots() call
Args:
ec2 (object): ec2 client object
account (dict): aws accounts
region (dict): regions
output_bucket (list): results bucket holder
Returns:
nothing. appends results to output_bucket
"""
'''extract owner_id from role'''
owner_id = str(re.split(':',account.get('role_arn'))[4])
'''get list of amis owned by owner_id'''
ami_list = ec2.describe_images(Owners=[owner_id]).get('Images')
for ami_obj in ami_list:
output_bucket.append(misc.format_line((
misc.check_if(account.get('name')),
misc.check_if(region.get('RegionName')),
misc.check_if(str(ami_obj.get('ImageId'))),
misc.check_if(str(ami_obj.get('State'))),
misc.check_if(str(date_to_days(ami_obj.get('CreationDate')))),
misc.check_if(str(ami_obj.get('Public'))),
#'''get rid of commas if present'''
misc.check_if(str(re.sub('[,]','', ami_obj.get('Name')))),
)))
| gpl-2.0 | -2,497,022,850,391,803,400 | 39.060904 | 91 | 0.487127 | false |
cwlseu/recipes | PythonApp/cocoanalysis/src/plot.py | 2 | 2163 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def draw_histmap(z, x, y, filename):
fig = plt.figure()
ax = Axes3D(fig)
#ax = fig.gca(projection='3d')
ax.plot(x, y, z, label='parametric curve')
ax.legend()
ax.set_xlabel('Width')
ax.set_ylabel('Height')
ax.set_zlabel('Objects Count')
#plt.show()
plt.savefig(filename)
plt.close()
def draw_heatmap(data, xlabels, ylabels, filename):
figure=plt.figure(facecolor='w')
ax=figure.add_subplot(2,1,1,position=[1,1,1,1])
ax.set_yticks(range(len(ylabels)))
ax.set_yticklabels(ylabels)
ax.set_xticks(range(len(xlabels)))
ax.set_xticklabels(xlabels)
vmax=data[0][0]
vmin=data[0][0]
for i in data:
for j in i:
if j>vmax:
vmax=j
if j<vmin:
vmin=j
_map = ax.imshow(data,interpolation='lanczos',cmap='viridis',aspect='auto',vmin=vmin,vmax=vmax)
cb = plt.colorbar(mappable=_map,cax=None,ax=None,shrink=2)
#plt.show()
plt.savefig(filename)
plt.close()
def draw_blubble(data, x, y, filename):
colors = np.random.rand(len(x))
plt.scatter(x, y, s=data, c=colors, alpha=1)
# plt.show()
plt.savefig(filename)
plt.close()
def draw_bar3d(data, width, height, strip_size, filename):
x = np.array([i*strip_size for i in range(width/strip_size)])
y = np.array([i*strip_size for i in range(height/strip_size)])
hist = np.array(data)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xpos, ypos = np.meshgrid(x[:-1] + 0.25*strip_size, y[:-1] + 0.25*strip_size)
xpos = xpos.flatten('F')
ypos = ypos.flatten('F')
zpos = np.zeros_like(xpos)
# Construct arrays with the dimensions for the 3dbars.
dx = 0.5 * np.ones_like(zpos)
dy = dx.copy()
ax.bar3d(xpos, ypos, zpos, dx, dy, hist, color='r', zsort='average')
# ax.annotate(hist, va="bottom", ha="center")
# ax.annotate("", xy=zip(xpos, ypos), xytext=hist)
plt.savefig(filename)
plt.close() | gpl-3.0 | -4,325,561,795,716,307,000 | 30.823529 | 99 | 0.627369 | false |
tunetosuraj/spectrum | recommenders/keyword_api.py | 1 | 8701 | import requests, os, nltk, math
from items.models import Key
# from people.models import UserProfile
from recommenders.models import KeyToKeyLink, KeyToUserLink
class BigHugeThesaurus:
def __init__(self, key=None):
self.url = "http://words.bighugelabs.com/api/2/"
self.key = key
def request(self):
self.url = self.url + os.environ['SPECTRUM_BIGHUGELABS_SECRET_KEY'] + '/' + str(self.key.name) + '/json'
self.response = requests.get(self.url)
if self.response.status_code == 200 or self.response.status_code == 301:
return self.response.json()
else:
self.response = {'err':'404,500,etc'}
return self.response
def filter(self):
raw_response = self.request()
response = []
if raw_response.get('noun'):
for word in raw_response.get('noun').get('syn', []):
response.append(str(word))
for word in raw_response.get('noun').get('sim', []):
response.append(str(word))
for word in raw_response.get('noun').get('rel', []):
response.append(str(word))
if raw_response.get('verb'):
for word in raw_response.get('verb').get('syn', []):
response.append(str(word))
for word in raw_response.get('verb').get('sim', []):
response.append(str(word))
for word in raw_response.get('verb').get('rel', []):
response.append(str(word))
return response
def _get_or_create(self, keyword, weight=None):
try:
listing = Key.objects.get(name__iexact=str(keyword))
listing.stop_bighuge = True
listing.save()
except Key.DoesNotExist:
listing = Key.objects.create(name=str(keyword), base_level=False)
KeyToKeyLink.objects.create(item1=listing, item2=self.key, raw_weight=weight, calculated_weight=weight, origin='BigHuge Synonyms')
return listing
def analyse(self):
response = self.filter()
for k in response:
self._get_or_create(k)
return response
class WordsAPI:
def __init__(self, key=None):
self.url = "https://wordsapiv1.p.mashape.com/words/"
self.key = key
def request(self):
self.url = self.url + str(self.key.name)
self.response = requests.get( self.url,
headers={
"X-Mashape-Key": os.environ['SPECTRUM_MASHAPE_SECRET_KEY'],
"Accept": "application/json"
}
).json()
return self.response
def filter(self):
raw_response = self.request()
typeof_response = []
hastypes_response = []
if raw_response.get('results'):
for result in raw_response.get('results'):
if result.get('typeOf'):
for word in result.get('typeOf'):
typeof_response.append(str(word))
if result.get('hasTypes'):
for word in result.get('hasTypes'):
hastypes_response.append(str(word))
return typeof_response, hastypes_response
def _get_or_create(self, keyword, weight=None, _type='Top Down'):
try:
listing = Key.objects.get(name__iexact=str(keyword))
listing.stop_wordsapi = True
listing.save()
except Key.DoesNotExist:
listing = Key.objects.create(name=str(keyword), stop_wordsapi=True)
KeyToKeyLink.objects.create(item1=listing, item2=self.key, raw_weight=weight, calculated_weight=weight, origin='WordsAPI Symantec Relatedness '+ _type)
return listing
def analyse(self):
typeof_response, hastypes_response = self.filter()
for k in typeof_response:
self._get_or_create(k, 0.65, 'Bottom Up')
for k in hastypes_response:
self._get_or_create(k, 0.35, 'Top Down')
return (typeof_response + hastypes_response)
class NltkDistance:
def __init__(self, key1=None, key2=None):
self.k1 = key1
self.k2 = key2
self.key1 = self.key1.name
self.key2 = self.key2.name
def split(self,text):
clean_text = ''.join(e for e in text if e.isalnum() or e == ' ') # Removes punctuation
word_list = []
for i in clean_text:
if not i.isdigit(): # Removes digits
word_list.append(i)
return word_list
def calculate(self):
keywords1 = self.split(self.key1)
keywords2 = self.split(self.key2)
similarity_constant = 4
minimum = 1 / math.e
maximum = 1
score_list = []
for i in keywords1:
for j in keywords2:
x = len(i)
y = len(j)
distance = nltk.edit_distance(i, j)
if ((x + y) / similarity_constant) >= distance:
raw_score = 2/( math.e**( x/distance ) + ( math.e**( y/distance ) ) )
scaled_score = (raw_score - minimum) / (maximum - minimum)
score_list.append(scaled_score)
if score_list:
return max(score_list)
else:
return None
def _get(self, keyword, weight=None):
try:
self.k1.stop_nltkdistance = True
self.k1.save()
KeyToKeyLink.objects.create(item1=self.k1, item2=self.k2, raw_weight=weight, calculated_weight=weight, origin='NLTK Distance')
except Key.DoesNotExist:
pass
def analyse(self):
score = self.calculate()
if score is not None and score >= 0.4:
self._get(k, weight=score)
return score
def api_call(key):
def big(key):
if key.base_level is True:
if not key.stop_bighuge:
print('Calling Synonym BigHugeThesaurus API..')
k = BigHugeThesaurus(key=key)
k.analyse()
key.stop_bighuge = True
key.save()
def words(key):
if key.base_level is True:
if not key.stop_wordsapi:
print('Calling Symantec WordsAPI..')
k = WordsAPI(key=key)
k.analyse()
key.stop_wordsapi = True
key.save()
# Schedule Jobs here..
print('Attempt...')
big(key)
words(key)
print('Completed.')
def nltk_distance(key1,key2):
if not key1.stop_nltkdistance:
if not Key.objects.filter(item1=key1, item2=key2, origin='NLTK distance') and not Key.objects.filter(item1=key2, item2=key1, origin='NLTK distance'):
print('Calling NLTK distance..')
k = NltkDistance(key1, key2)
k.analyse()
def destroy_k2u_links(keys, user):
for k in keys:
try:
element = KeyToUserLink.objects.get(item1__name__iexact=str(k), item2=user, raw_weight=weight, calculated_weight=weight, origin='User Interest/Career')
element.delete()
except KeyToUserLink.DoesNotExist:
pass
def add_k2u_links_by_key(keys, user):
weight = 0.8
for k in keys:
try:
listing = Key.objects.get(name__iexact=str(k))
except Key.DoesNotExist:
listing = Key.objects.create(name=str(k))
KeyToUserLink.objects.create(item1=listing, item2=user, raw_weight=weight, calculated_weight=weight, origin='User Interest/Career')
def add_k2u_links_by_user(user):
weight = 0.8
keys = [x.keyword for x in user.interest_keywords.all()] + [x.keyword for x in user.career_keywords.all()]
for k in keys:
if KeyToUserLink.objects.get(item1__name__iexact=str(k), item2=user, origin='User Interest/Career'):
try:
listing = Key.objects.get(name__iexact=str(k))
except Key.DoesNotExist:
listing = Key.objects.create(name=str(k))
KeyToUserLink.objects.create(item1=listing, item2=user, raw_weight=weight, calculated_weight=weight, origin='User Interest/Career')
def user_migration():
user_list = [] #UserProfile.objects.all()
for u in user_list:
print('User : ' + str(u))
try:
add_k2u_links_by_user(u)
print('Linked keywords.')
except:
pass
def migration():
keyword_list = Key.objects.all()
for keyword in keyword_list:
print('Keyword : ' + str(keyword))
api_call(keyword)
print('-- next() --> ? ')
print('-- All Tasks Completed --')
| agpl-3.0 | 4,338,635,809,597,963,000 | 31.588015 | 163 | 0.557292 | false |
CIGNo-project/CIGNo | cigno/metadata/forms.py | 1 | 3488 | # -*- coding: utf-8 -*-
from django import forms
import json
import os
import tempfile
from models import *
supported_format = (".shp", ".tif", ".tiff", ".geotif", ".geotiff")
class JSONField(forms.CharField):
def clean(self, text):
text = super(JSONField, self).clean(text)
try:
return json.loads(text)
except ValueError:
raise forms.ValidationError("this field must be valid JSON")
class ResourceUploadForm(forms.ModelForm):
#resource_files = ("base_file",)
class Meta:
model = Resource
# TODO: use default language (configure resource_upload.html)
fields = ('titleml_it', 'abstractml_it', 'url_field', 'base_file')
#exclude = ('uuid','name')
### TODO manage permissions
#permissions = JSONField()
class ResourceSimpleForm(forms.ModelForm):
class Meta:
model = Resource
fields = ('titleml_it', 'titleml_en',
'abstractml_it', 'abstractml_en',
'presentation_form',
'gemetkeywords',
)
# class ResourceForm(forms.ModelForm):
# resource_files = ("base_file",)
# class Meta:
# model = Resource
# exclude = ('uuid',)
class ResourceForm(forms.ModelForm):
resource_files = ("base_file",)
class Meta:
model = Resource
# TODO: use default language (configure resource_upload.html)
fields = ('type', 'titleml_it', 'titleml_en', 'abstractml_it', 'abstractml_en', 'gemetkeywords', 'base_file', 'url_field', 'use_limitation', 'geographic_bounding_box', 'geonamesids', 'lineage_it' , 'lineage_en', 'equivalent_scale', 'distance', 'uom_distance', 'vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent', 'other_citation_details_it', 'other_citation_details_en', 'supplemental_information_ml_it', 'supplemental_information_ml_en', 'resource_type', 'language', 'character_set', 'update_frequency', 'spatial_representation_type_ext','license')
#exclude = ('uuid','name')
### TODO manage permissions
#permissions = JSONField()
class LayerExtForm(forms.ModelForm):
class Meta:
model = LayerExt
# TODO: use default language (configure resource_upload.html)
fields = ('titleml_it', 'titleml_en', 'abstractml_it', 'abstractml_en', 'gemetkeywords', 'use_limitation', 'geonamesids', 'lineage_it' , 'lineage_en', 'equivalent_scale', 'distance', 'uom_distance', 'vertical_datum', 'vertical_extent_min', 'vertical_extent_max', 'uom_vertical_extent', 'other_citation_details_it', 'other_citation_details_en', 'supplemental_information_ml_it', 'supplemental_information_ml_en', 'resource_type', 'language', 'character_set', 'update_frequency', 'spatial_representation_type_ext','license')
#exclude = ('uuid','name')
class ResponsiblePartyForm(forms.ModelForm):
class Meta:
model = ResponsibleParty
fields = ('organization_name_it', 'office_it', 'name', 'surname')
# from django.forms.models import modelformset_factory, inlineformset_factory
# ResourceReferenceDateInlineFormSet = inlineformset_factory(Resource, ResourceReferenceDate)
# ResourceTemporalExtentInlineFormSet = inlineformset_factory(Resource, ResourceTemporalExtent)
# ResourceResponsiblePartyRoleInlineFormSet = inlineformset_factory(Resource, ResourceResponsiblePartyRole)
# ResourceMdResponsiblePartyRoleInlineFormSet = inlineformset_factory(Resource, ResourceMdResponsiblePartyRole)
| gpl-3.0 | -7,000,093,021,048,509,000 | 46.135135 | 592 | 0.679759 | false |
gazeti/aleph | aleph/views/leads_api.py | 3 | 1652 | from flask import Blueprint, request
from apikit import obj_or_404, jsonify, request_data
from werkzeug.exceptions import BadRequest
from aleph.model import Collection, EntityIdentity
from aleph.search import QueryState
from aleph.search.leads import leads_query
from aleph.logic import update_entity, update_lead
from aleph.events import log_event
from aleph.views.util import get_entity
blueprint = Blueprint('leads_api', __name__)
@blueprint.route('/api/1/collections/<int:collection_id>/leads',
methods=['GET'])
def index(collection_id):
collection = obj_or_404(Collection.by_id(collection_id))
request.authz.require(request.authz.collection_read(collection))
state = QueryState(request.args, request.authz)
results = leads_query(collection_id, state)
return jsonify(results)
@blueprint.route('/api/1/collections/<int:collection_id>/leads',
methods=['POST', 'PUT'])
def update(collection_id):
collection = obj_or_404(Collection.by_id(collection_id))
request.authz.require(request.authz.collection_write(collection))
data = request_data()
entity, obj = get_entity(data.get('entity_id'), request.authz.WRITE)
if obj.collection_id != collection_id:
raise BadRequest("Entity does not belong to collection.")
match, _ = get_entity(data.get('match_id'), request.authz.READ)
judgement = data.get('judgement')
if judgement not in EntityIdentity.JUDGEMENTS:
raise BadRequest("Invalid judgement.")
update_lead(entity, match, judgement, judge=request.authz.role)
log_event(request)
update_entity(obj)
return jsonify({'status': 'ok'})
| mit | 1,808,863,678,686,182,700 | 38.333333 | 72 | 0.72155 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.