repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 475
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,293,591B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
TeamHG-Memex/frontera | frontera/tests/test_frontera_scheduler.py | 1 | 7364 | from frontera.contrib.scrapy.schedulers.frontier import FronteraScheduler
from frontera.tests.mocks.frontier_manager import FakeFrontierManager
from frontera.tests.mocks.crawler import FakeCrawler
from frontera.core.models import Request as FRequest
from frontera.core.models import Response as FResponse
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.settings import Settings
# test requests
r1 = Request('http://www.example.com')
r2 = Request('https://www.example.com/some/page')
r3 = Request('http://example1.com')
# test requests with redirects
rr1 = Request('http://www.example.com', meta={'redirect_times': 1})
rr2 = Request('https://www.example.com/some/page', meta={'redirect_times': 4})
rr3 = Request('http://example1.com', meta={'redirect_times': 0})
# test frontier requests
fr1 = FRequest('http://www.example.com')
fr2 = Request('https://www.example.com/some/page')
fr3 = Request('http://example1.com')
class TestFronteraScheduler(object):
def test_enqueue_requests(self):
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
assert fs.enqueue_request(r1) is True
assert fs.enqueue_request(r2) is True
assert fs.enqueue_request(r3) is True
assert set(seed.url for seed in fs.frontier.manager.seeds) == set([r1.url, r2.url, r3.url])
assert all([isinstance(seed, FRequest) for seed in fs.frontier.manager.seeds])
assert fs.stats_manager.stats.get_value('frontera/seeds_count') == 3
def test_redirect_disabled_enqueue_requests(self):
settings = Settings()
settings['REDIRECT_ENABLED'] = False
crawler = FakeCrawler(settings)
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
assert fs.enqueue_request(rr1) is False
assert fs.enqueue_request(rr2) is False
assert fs.enqueue_request(rr3) is True
assert isinstance(fs.frontier.manager.seeds[0], FRequest)
assert len(fs.frontier.manager.seeds) == 1
assert fs.frontier.manager.seeds[0].url == rr3.url
assert fs.stats_manager.stats.get_value('frontera/seeds_count') == 1
def test_redirect_enabled_enqueue_requests(self):
settings = Settings()
settings['REDIRECT_ENABLED'] = True
crawler = FakeCrawler(settings)
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
assert fs.enqueue_request(rr1) is True
assert fs.enqueue_request(rr2) is True
assert fs.enqueue_request(rr3) is True
assert len(fs.frontier.manager.seeds) == 1
assert isinstance(fs.frontier.manager.seeds[0], FRequest)
assert fs.frontier.manager.seeds[0].url == rr3.url
assert set([request.url for request in fs._pending_requests]) == set([rr1.url, rr2.url])
assert all([isinstance(request, Request) for request in fs._pending_requests])
assert fs.stats_manager.stats.get_value('frontera/seeds_count') == 1
assert fs.stats_manager.stats.get_value('frontera/redirected_requests_count') == 2
def test_next_request(self):
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
fs.frontier.manager.put_requests([fr1, fr2, fr3])
requests = [fs.next_request() for _ in range(3)]
assert set([request.url for request in requests]) == set([fr1.url, fr2.url, fr3.url])
assert all([isinstance(request, Request) for request in requests])
assert fs.stats_manager.stats.get_value('frontera/returned_requests_count') == 3
def test_next_request_manager_finished(self):
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
fs.frontier.manager.put_requests([fr1])
fs.frontier.manager.finished = True
assert fs.next_request() is None
assert fs.stats_manager.stats.get_value('frontera/returned_requests_count') is None
def test_next_request_overused_keys_info(self):
settings = Settings()
settings['CONCURRENT_REQUESTS_PER_DOMAIN'] = 0
settings['CONCURRENT_REQUESTS_PER_IP'] = 5
crawler = FakeCrawler(settings)
# the keys in the slot_dict are ip's, the first value in the pair is the
# slot.active list(only it's length is needed) and the second value is slot.concurrency.
slot_dict = {'1.2.3': ([0]*3, 1), '2.1.3': ([0]*30, 2), '3.2.2': ([0]*5, 1), '4.1.3': ([0]*110, 20)}
crawler.set_slots(slot_dict)
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
fs.frontier.manager.put_requests([fr1])
request = fs.next_request()
assert request.url == fr1.url
assert isinstance(request, Request)
assert fs.frontier.manager.get_next_requests_kwargs[0]['key_type'] == 'ip'
assert set(fs.frontier.manager.get_next_requests_kwargs[0]['overused_keys']) == set(['2.1.3', '4.1.3'])
assert fs.stats_manager.stats.get_value('frontera/returned_requests_count') == 1
def test_process_spider_output(self):
i1 = {'name': 'item', 'item': 'i1'}
i2 = {'name': 'item', 'item': 'i2'}
result = [r1, r2, r3, i1, i2]
resp = Response(fr1.url, request=Request(fr1.url, meta={'frontier_request': fr1}))
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
assert sorted(list(fs.process_spider_output(resp, result, Spider))) == sorted([i1, i2])
assert isinstance(fs.frontier.manager.responses[0], FResponse)
assert fs.frontier.manager.responses[0].url == resp.url
assert set([request.url for request in fs.frontier.manager.links]) == set([r1.url, r2.url, r3.url])
assert all([isinstance(request, FRequest) for request in fs.frontier.manager.links])
assert fs.stats_manager.stats.get_value('frontera/crawled_pages_count') == 1
assert fs.stats_manager.stats.get_value('frontera/crawled_pages_count/200') == 1
assert fs.stats_manager.stats.get_value('frontera/links_extracted_count') == 3
def test_process_exception(self):
exception = type('exception', (object,), {})
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
fs.process_exception(r1, exception(), Spider)
error = fs.frontier.manager.errors.pop()
assert error[0].url == r1.url
assert error[1] == 'exception'
assert fs.stats_manager.stats.get_value('frontera/request_errors_count') == 1
assert fs.stats_manager.stats.get_value('frontera/request_errors_count/exception') == 1
def test_close(self):
crawler = FakeCrawler()
fs = FronteraScheduler(crawler, manager=FakeFrontierManager)
fs.open(Spider)
fs.frontier.manager.put_requests([fr1, fr2, fr3])
fs.next_request()
fs.frontier.manager.iteration = 5
fs.close('reason')
assert fs.frontier.manager._stopped is True
assert fs.stats_manager.stats.get_value('frontera/pending_requests_count') == 2
assert fs.stats_manager.stats.get_value('frontera/iterations') == 5
| bsd-3-clause | 8,694,923,118,081,443,000 | 48.093333 | 111 | 0.668523 | false |
zuck/scribee | renderers/html.py | 1 | 2199 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file is part of the Scribee project.
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.1'
import os, shutil
from StringIO import StringIO
from mako.lookup import TemplateLookup
from mako.template import Template
from mako.runtime import Context
from entity import Entity
import settings
def render(entities, output_dir):
basepath = os.path.join(output_dir, "html")
if os.path.exists(basepath):
shutil.rmtree(basepath)
HTML_ENTITIES_TEMPLATE = getattr(settings, "HTML_ENTITIES_TEMPLATE", "templates/entities.html")
HTML_ENTITY_TEMPLATE = getattr(settings, "HTML_ENTITY_TEMPLATE", "templates/entity.html")
HTML_INDEX_TEMPLATE = getattr(settings, "HTML_INDEX_TEMPLATE", "templates/index.html")
HTML_STATIC_ROOT = getattr(settings, "HTML_STATIC_ROOT", "templates/static")
# 1) Copies static files (it also creates <basepath>).
shutil.copytree(HTML_STATIC_ROOT, basepath)
# 2) Renders entity list page.
render_template({'entities': entities}, os.path.join(basepath, 'entities' + ".html"), HTML_ENTITIES_TEMPLATE)
# 3) Renders single entity page.
for entity in entities:
if not entity.parent or entity.type == Entity.Types.Class or entity.parent.type not in (Entity.Types.Class, Entity.Types.Function):
render_template({'entity': entity}, os.path.join(basepath, entity.uid() + ".html"), HTML_ENTITY_TEMPLATE)
# 4) Renders the index page.
render_template({'entities': entities}, os.path.join(basepath, 'index' + ".html"), HTML_INDEX_TEMPLATE)
def render_template(context, filename, template):
fd = open(filename, "w")
output = render_to_string(context, template)
fd.write(output)
fd.close()
def render_to_string(context, template):
fd = open(template, "r")
source = fd.read()
fd.close()
output = StringIO()
lookup = TemplateLookup(directories=[template.rpartition("/")[0]])
ctx = Context(output, **context)
Template(source, lookup=lookup).render_context(ctx)
return output.getvalue()
| mit | 3,229,216,363,738,476,500 | 36.913793 | 139 | 0.684402 | false |
laurentb/weboob | modules/agendadulibre/module.py | 1 | 6527 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
from weboob.tools.backend import Module, BackendConfig
from weboob.capabilities.calendar import CapCalendarEvent, CATEGORIES
from weboob.tools.value import Value
from .browser import AgendadulibreBrowser
__all__ = ['AgendadulibreModule']
class AgendadulibreModule(Module, CapCalendarEvent):
NAME = 'agendadulibre'
DESCRIPTION = u'agendadulibre website'
MAINTAINER = u'Bezleputh'
EMAIL = '[email protected]'
LICENSE = 'AGPLv3+'
VERSION = '2.1'
ASSOCIATED_CATEGORIES = [CATEGORIES.CONF]
BROWSER = AgendadulibreBrowser
region_choices = OrderedDict([(k, u'%s (%s)' % (v, k)) for k, v in sorted({
"https://www.agendadulibre.org": u'--France--',
"https://www.agendadulibre.org#3": u'Auvergne-RhΓ΄ne-Alpes',
"https://www.agendadulibre.org#5": u'Bourgogne-Franche-ComtΓ©',
"https://www.agendadulibre.org#6": u'Bretagne',
"https://www.agendadulibre.org#7": u'Centre-Val de Loire',
"https://www.agendadulibre.org#30": u'CollectivitΓ© sui generis',
"https://www.agendadulibre.org#29": u'CollectivitΓ©s d\'outre-mer',
"https://www.agendadulibre.org#9": u'Corse',
"https://www.agendadulibre.org#1": u'Grand Est',
"https://www.agendadulibre.org#23": u'Guadeloupe',
"https://www.agendadulibre.org#24": u'Guyane',
"https://www.agendadulibre.org#17": u'Hauts-de-France',
"https://www.agendadulibre.org#12": u'Γle-de-France',
"https://www.agendadulibre.org#31": u'Internet',
"https://www.agendadulibre.org#26": u'La RΓ©union',
"https://www.agendadulibre.org#25": u'Martinique',
"https://www.agendadulibre.org#28": u'Mayotte',
"https://www.agendadulibre.org#4": u'Normandie',
"https://www.agendadulibre.org#2": u'Nouvelle-Aquitaine',
"https://www.agendadulibre.org#13": u'Occitanie',
"https://www.agendadulibre.org#18": u'Pays de la Loire',
"https://www.agendadulibre.org#21": u'Provence-Alpes-CΓ΄te d\'Azur',
"https://www.agendadulibre.be": u'--Belgique--',
"https://www.agendadulibre.be#11": u'Antwerpen',
"https://www.agendadulibre.be#10": u'Brabant wallon',
"https://www.agendadulibre.be#9": u'Bruxelles-Capitale',
"https://www.agendadulibre.be#8": u'Hainaut',
"https://www.agendadulibre.be#7": u'Liege',
"https://www.agendadulibre.be#6": u'Limburg',
"https://www.agendadulibre.be#5": u'Luxembourg',
"https://www.agendadulibre.be#4": u'Namur',
"https://www.agendadulibre.be#3": u'Oost-Vlaanderen',
"https://www.agendadulibre.be#2": u'Vlaams-Brabant',
"https://www.agendadulibre.be#1": u'West-Vlaanderen',
"https://www.agendadulibre.ch": u'--Suisse--',
"https://www.agendadulibre.ch#15": u'Appenzell Rhodes-ExtΓ©rieures',
"https://www.agendadulibre.ch#16": u'Appenzell Rhodes-IntΓ©rieures',
"https://www.agendadulibre.ch#19": u'Argovie',
"https://www.agendadulibre.ch#13": u'BΓ’le-Campagne',
"https://www.agendadulibre.ch#12": u'BΓ’le-Ville',
"https://www.agendadulibre.ch#2": u'Berne',
"https://www.agendadulibre.ch#10": u'Fribourg',
"https://www.agendadulibre.ch#25": u'Genève',
"https://www.agendadulibre.ch#8": u'Glaris',
"https://www.agendadulibre.ch#18": u'Grisons',
"https://www.agendadulibre.ch#26": u'Jura',
"https://www.agendadulibre.ch#3": u'Lucerne',
"https://www.agendadulibre.ch#24": u'NeuchΓ’tel',
"https://www.agendadulibre.ch#7": u'Nidwald',
"https://www.agendadulibre.ch#6": u'Obwald',
"https://www.agendadulibre.ch#17": u'Saint-Gall',
"https://www.agendadulibre.ch#14": u'Schaffhouse',
"https://www.agendadulibre.ch#5": u'Schwytz',
"https://www.agendadulibre.ch#11": u'Soleure',
"https://www.agendadulibre.ch#21": u'Tessin',
"https://www.agendadulibre.ch#20": u'Thurgovie',
"https://www.agendadulibre.ch#4": u'Uri',
"https://www.agendadulibre.ch#23": u'Valais',
"https://www.agendadulibre.ch#22": u'Vaud',
"https://www.agendadulibre.ch#9": u'Zoug',
"https://www.agendadulibre.ch#1": u'Zurich',
}.items())])
CONFIG = BackendConfig(Value('region', label=u'Region', choices=region_choices))
def create_default_browser(self):
choice = self.config['region'].get().split('#')
selected_region = '' if len(choice) < 2 else choice[-1]
return self.create_browser(website=choice[0], region=selected_region)
def search_events(self, query):
return self.browser.list_events(query.start_date,
query.end_date,
query.city,
query.categories)
def list_events(self, date_from, date_to=None):
return self.browser.list_events(date_from, date_to)
def get_event(self, event_id):
return self.browser.get_event(event_id)
def fill_obj(self, event, fields):
event = self.browser.get_event(event.id, event)
choice = self.config['region'].get().split('#')
selected_region = '' if len(choice) < 2 else choice[-1]
if selected_region == '23':
event.timezone = 'America/Guadeloupe'
elif selected_region == '24':
event.timezone = 'America/Guyana'
elif selected_region == '26':
event.timezone = 'Indian/Reunion'
elif selected_region == '25':
event.timezone = 'America/Martinique'
else:
event.timezone = 'Europe/Paris'
return event
OBJECTS = {AgendadulibreBrowser: fill_obj}
| lgpl-3.0 | -5,587,004,435,638,424,000 | 45.198582 | 84 | 0.630181 | false |
xfix/NextBoard | forum/models.py | 1 | 4985 | from django.db import models
import django.contrib.auth.models as auth
from django.utils.timezone import now
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from markdown import markdown
class User(auth.User):
"""Model for representing users.
It has few fields that aren't in the standard authentication user
table, and are needed for the forum to work, like footers.
"""
display_name = models.CharField(max_length=30, null=True)
footer = models.TextField(null=True)
def __str__(self):
"""Show display name or user name."""
return self.display_name or self.username
class Category(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
"""Show category name."""
return self.name
class Forum(models.Model):
"""Model for representing forums."""
category = models.ForeignKey(Category)
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
"""Show forum title."""
return self.title
def postcount(self):
"""Show forum postcount."""
return Post.objects.filter(thread__forum=self).count()
@cached_property
def last_post(self):
"""Show last post in the forum."""
result = Revision.objects.raw('''
SELECT revision.id, post_id, author_id, date_created
FROM forum_post AS post
JOIN forum_revision AS revision
ON revision.id = (SELECT id
FROM forum_revision
WHERE post_id = post.id
ORDER BY date_created
LIMIT 1
)
JOIN forum_thread AS thread
ON thread.id = thread_id
WHERE forum_id = %s
ORDER BY date_created DESC
LIMIT 1
''', [self.id])
try:
return result[0]
except IndexError:
return None
class Meta:
order_with_respect_to = 'category'
class Thread(models.Model):
"""Model for representing threads."""
forum = models.ForeignKey(Forum)
title = models.CharField(max_length=100)
views = models.PositiveIntegerField(default=0)
sticky = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
def __str__(self):
"""Show thread title."""
return self.title
class Meta:
ordering = ['-sticky']
@cached_property
def last_post(self):
"""Show last post in the thread."""
return Revision.objects.raw('''
SELECT revision.id, post_id, author_id, date_created
FROM forum_post AS post
JOIN forum_revision AS revision
ON revision.id = (SELECT id
FROM forum_revision
WHERE post_id = post.id
ORDER BY date_created
LIMIT 1
)
WHERE thread_id = %s
ORDER BY date_created DESC
LIMIT 1
''', [self.id])[0]
def author(self):
"""Show author of post."""
return self.post_set.first().author()
def replies(self):
"""Show number of replies in thread."""
return self.post_set.count() - 1
class Post(models.Model):
"""Model for representing posts.
Actual posts are stored in Revision, this only stores the
thread number. The first created revision contains the author
of post and date of its creation. The last revision contains actual
text post.
"""
thread = models.ForeignKey(Thread)
def first_revision(self):
"""Get first revision.
The first revision is important for things like post author.
"""
return self.revision_set.first()
def last_revision(self):
"""Get last revision.
The last revision contains most current post contents.
"""
return self.revision_set.last()
def author(self):
"""Get author.
This usually shows along with the post.
"""
return self.first_revision().author
@cached_property
def html(self):
"""Get post contents in HTML format."""
return self.last_revision().html
class Revision(models.Model):
"""Model for representing post revisions.
The first revision for given post contains its author and date to
show to the user. The last revision shows the date it was created
on.
"""
post = models.ForeignKey(Post)
author = models.ForeignKey(User)
date_created = models.DateTimeField(default=now)
text = models.TextField()
@cached_property
def html(self):
"""Return HTML version of post (in Markdown format)."""
return mark_safe(markdown(self.text))
class Meta:
ordering = ['date_created']
| mit | 8,401,562,665,391,931,000 | 29.03012 | 71 | 0.589769 | false |
42cc/django-x-file-accel | x_file_accel_redirects/migrations/0001_initial.py | 1 | 2184 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AccelRedirect'
db.create_table('xfar_accelredirect', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=64)),
('prefix', self.gf('django.db.models.fields.CharField')(default='media', unique=True, max_length=64)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=True)),
('internal_path', self.gf('django.db.models.fields.CharField')(max_length=64)),
('serve_document_root', self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True)),
('filename_solver', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
))
db.send_create_signal('x_file_accel_redirects', ['AccelRedirect'])
def backwards(self, orm):
# Deleting model 'AccelRedirect'
db.delete_table('xfar_accelredirect')
models = {
'x_file_accel_redirects.accelredirect': {
'Meta': {'object_name': 'AccelRedirect', 'db_table': "'xfar_accelredirect'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'filename_solver': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_path': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'prefix': ('django.db.models.fields.CharField', [], {'default': "'media'", 'unique': 'True', 'max_length': '64'}),
'serve_document_root': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'})
}
}
complete_apps = ['x_file_accel_redirects'] | bsd-3-clause | 6,863,806,590,103,413,000 | 51.02381 | 132 | 0.613553 | false |
tomekjarosik/metrics | app/tests.py | 1 | 4832 | import os
import app
import unittest
import filesystem_helper
import metrics_sender
from flask import json
import utils
class AppTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = app.app.test_client()
def setUp(self):
self.app_context = app.app.app_context()
self.app_context.push()
self.metricsender = metrics_sender.MetricSender()
def tearDown(self):
self.app_context.pop()
def _post(self, a_dict):
response = self.client.post('/buildmetrics/api/v1.0/add',
data=json.dumps(a_dict), content_type='application/json')
return response
def test_send_request_buildmetrics(self):
request_data = self.metricsender.prepare_request_data("testuser", 123123, 567678, {"t1" : 0, "t2" : 180},
True, None, "testgitstatus", None)
response = self._post(request_data)
self.assertEquals(201, response.status_code)
def test_get_list_of_all_metrics(self):
response = self.client.get('/metrics')
self.assertEquals(200, response.status_code)
def test_get_detail_for_metric(self):
response = self.client.get('/metrics?mid=41')
self.assertEquals(200, response.status_code)
@unittest.skip("testing skipping")
def test_empty_db(self):
rv = self.app.get('/')
assert 'No entries here so far' in rv.data
class MetricSenderTests(unittest.TestCase):
def setUp(self):
self.metricsender = metrics_sender.MetricSender()
def test_parse_build_times(self):
results = self.metricsender.parse_build_times("tests/build.times.in")
self.assertEquals(2, results[':app:buildInfoDebugLoader'])
self.assertEquals(187, results[':app:preBuild'])
self.assertEquals(0, results[':app:preDebugBuild'])
self.assertEquals(625, results['total time'])
def test_prepare_request_data(self):
res = self.metricsender.prepare_request_data("testuser", 123123, 234234, {"t1" : 0, "t2" : 180},
True, None, "testgitstatus", None)
self.assertEquals("testuser", res["username"])
self.assertEquals(180, res["scores"]["t2"])
self.assertEquals(True, res["is_success"])
self.assertEquals(123123, res["timestamp"])
self.assertEquals(234234, res["previous_timestamp"])
def test_send_request(self):
r = self.metricsender.send_request("testuser", 123123, 234234, {"t1": 13, "t10": 14}, True, "some diff", "gitstatus", "some env")
self.assertEquals(201, r.status_code)
class FilesystemHelperTests(unittest.TestCase):
def setUp(self):
self.helper = filesystem_helper.FilesystemHelper()
pass
def tearDown(self):
pass
def test_files_modified_since(self):
import datetime as dt, time
names = ["_t1.txt", "_t2.txt"]
root = "."
f1 = open(names[0], "w")
result = self.helper.files_modified_since(root, dt.datetime.now() - dt.timedelta(seconds=0.51))
self.assertEquals(os.path.join(root, names[0]), result[0])
self.assertEquals(1, result.__len__())
time.sleep(0.25)
f2 = open(names[1], "w")
result = self.helper.files_modified_since(root, dt.datetime.now() - dt.timedelta(seconds=0.5))
self.assertEquals(os.path.join(root, names[1]), result[1])
self.assertEquals(2, result.__len__())
time.sleep(0.35)
result = self.helper.files_modified_since(root, dt.datetime.now() - dt.timedelta(seconds=0.5))
self.assertEquals(os.path.join(root, names[1]), result[0])
self.assertEquals(1, result.__len__())
f1.close()
f2.close()
for name in names:
os.remove(os.path.join(root, name))
def test_git_status(self):
res = self.helper.git_status()
self.assertTrue(res.startswith("# On branch master"))
class PlatformInfoTest(unittest.TestCase):
def setUp(self):
from utils import PlatformInfo
self.info = PlatformInfo().info()
def test_platform_info(self):
self.assertIsNotNone(self.info["machine"])
self.assertIsNotNone(self.info["user"])
class CustomSorterTest(unittest.TestCase):
def setUp(self):
self.customsorter = utils.CustomSorter()
pass
def test_sort_scores(self):
a_dict = {}
for i in range(0, 10):
a_dict["t"+str(i)] = i * (i % 3)
actual = self.customsorter.sort_scores(a_dict, True)
expected = [('t8', 16), ('t5', 10), ('t7',7), ('t4', 4), ('t2', 4), ('t1', 1), ('t9', 0), ('t6', 0), ('t3', 0), ('t0', 0)]
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
| mit | 2,433,743,428,664,007,700 | 35.330827 | 137 | 0.606167 | false |
JasonLC506/CollaborativeFiltering | kcoreDecomposition.py | 1 | 5916 | import pymongo
import cPickle
database = pymongo.MongoClient().dataSet
col = database.user_reaction_filtered
logfile = "log_kcore"
users_list_file = "data/user_dict_NYTWaPoWSJ"
items_list_file = "data/item_dict_NYTWaPoWSJ"
def slotsort(entry_dictionary):
sorted = {}
for ID in entry_dictionary.keys():
value = entry_dictionary[ID]
sorted.setdefault(entry_dictionary[ID],{})
sorted[value][ID] = True
return sorted
def list_del(K, entry_list, entry_list_sorted):
entry_list_del = []
for k in range(K):
if k in entry_list_sorted:
entry_list_del += entry_list_sorted[k].keys()
for entryID in entry_list_sorted[k].keys():
del entry_list[entryID]
del entry_list_sorted[k]
return entry_list_del
def transaction_del(K = None, users_list_del = None, items_list_del = None, items_list_sorted = None, items_list = None, users_list_sorted = None, users_list = None, col_edge = None):
if users_list_del is None:
UDEL = False
else:
UDEL = True
if UDEL:
item_update = {}
for userID in users_list_del:
edge_cursor = col_edge.find({"READERID": userID}, no_cursor_timeout = True)
for edge in edge_cursor:
kcore = edge["KCORE"]
if kcore != 0 and kcore <= K:
print "kcore", kcore
print "edge", edge
continue # already excluded by smaller k core
itemID = edge["POSTID"]
# print "item to be modified", itemID
item_update.setdefault(itemID,0)
item_update[itemID] += 1
# print item_update
edge["KCORE"] = K
col_edge.save(edge)
print "total item to be updated", len(item_update), "total reactions to del", sum(item_update.values())
listUpdate(items_list, items_list_sorted, item_update)
else:
user_update = {}
for itemID in items_list_del:
edge_cursor = col_edge.find({"$and":[{"POSTID": itemID},{"KCORE": 0}]}, no_cursor_timeout = True)
for edge in edge_cursor:
kcore = edge["KCORE"]
if kcore != 0 and kcore <= K:
print "kcore", kcore
print "edge", edge
continue # already excluded by smaller k core
userID = edge["READERID"]
user_update.setdefault(userID,0)
user_update[userID] += 1
edge["KCORE"] = K
col_edge.save(edge)
print "total user to be updated", len(user_update), "total reactions to del", sum(user_update.values())
listUpdate(users_list, users_list_sorted, user_update)
def listUpdate(entry_list, entry_list_sorted, entry_update):
for entryID in entry_update.keys():
old_value = entry_list[entryID]
new_value = old_value - entry_update[entryID]
entry_list[entryID] = new_value
del entry_list_sorted[old_value][entryID]
entry_list_sorted.setdefault(new_value, {})[entryID] = True
def kcoreSingle(K, users_list_sorted, items_list_sorted, users_list, items_list, col_edge):
while True:
users_list_del = list_del(K, users_list, users_list_sorted)
with open(logfile, "a") as logf:
logf.write("users to be deleted" + str(len(users_list_del)) + "\n")
Nreaction = sum(items_list.values())
print "Nreaction from items before", Nreaction
transaction_del(K = K, users_list_del = users_list_del, items_list_sorted = items_list_sorted, items_list = items_list, col_edge = col_edge)
items_list_del = list_del(K, items_list, items_list_sorted)
with open(logfile, "a") as logf:
logf.write("items to be deleted" + str(len(items_list_del)) + "\n")
Nreaction = sum(items_list.values())
print "Nreaction from items after", Nreaction
if len(items_list_del) < 1:
with open(logfile, "a") as logf:
logf.write("kcore decomposition done with K = %d\n" % K)
break
transaction_del(K = K, items_list_del = items_list_del, users_list_sorted = users_list_sorted, users_list = users_list, col_edge = col_edge)
return users_list, items_list, users_list_sorted, items_list_sorted
def kcore(K, users_list_file, items_list_file, col_edge, store_every_k = False):
with open(users_list_file, "r") as f:
users_list = cPickle.load(f)
with open(items_list_file, "r") as f:
items_list = cPickle.load(f)
users_list_sorted = slotsort(users_list)
items_list_sorted = slotsort(items_list)
for k in range(2,K+1):
Nreaction = sum(items_list.values())
print "Nreaction from items before kcoreSingle", Nreaction
kcoreSingle(k, users_list_sorted, items_list_sorted, users_list, items_list, col_edge)
Nreaction = sum(items_list.values())
print "Nreaction from items after kcoreSingle", Nreaction
if store_every_k or k == K:
with open(users_list_file[:25] + "_K" + str(k), "w") as f:
cPickle.dump(users_list, f)
with open(items_list_file[:25] + "_K" + str(k), "w") as f:
cPickle.dump(items_list, f)
def RESET(K_MAX, col_edge):
col_edge.update_many({"KCORE":{"$gt": K_MAX-1}}, {"$set": {"KCORE": 0}}, upsert = False)
print "reset done, no k larger or equal than K_MAX"
if __name__ == "__main__":
database = pymongo.MongoClient().dataSet
col_edge = database.user_reaction_filtered
# !!!!!! RESET !!!!!!!! #
### RESET(2, col_edge)
#########################
users_list_file = "data/user_dict_NYTWaPoWSJ_K10"
items_list_file = "data/item_dict_NYTWaPoWSJ_K10"
K = 50
kcore(K, users_list_file, items_list_file, col_edge, store_every_k = True)
| mit | -7,255,527,684,046,012,000 | 41.561151 | 183 | 0.586376 | false |
polyaxon/polyaxon | platform/polycommon/polycommon/options/option.py | 1 | 3366 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import Optional, Tuple
from polyaxon.parser import parser
from polycommon.options.exceptions import OptionException
NAMESPACE_DB_OPTION_MARKER = ":"
NAMESPACE_DB_CONFIG_MARKER = "__"
NAMESPACE_SETTINGS_MARKER = "__"
NAMESPACE_ENV_MARKER = ":"
class OptionStores(Enum):
ENV = "env"
DB_OPTION = "db_option"
DB_CONFIG = "db_config"
SETTINGS = "settings"
class OptionScope(Enum):
GLOBAL = "global"
ORGANISATION = "organization"
TEAM = "team"
PROJECT = "project"
USER = "user"
class Option:
key = None
scope = OptionScope.GLOBAL
is_secret = False
is_optional = True
is_list = False
store = None
typing = None
default = None
options = None
description = None
cache_ttl = 0
@staticmethod
def get_default_value():
return None
@classmethod
def default_value(cls):
return cls.default if cls.default is not None else cls.get_default_value()
@classmethod
def is_global(cls):
return cls.scope == OptionScope.GLOBAL
@classmethod
def get_marker(cls) -> str:
if cls.store == OptionStores.DB_OPTION:
return NAMESPACE_DB_OPTION_MARKER
if cls.store == OptionStores.DB_CONFIG:
return NAMESPACE_DB_CONFIG_MARKER
if cls.store == OptionStores.SETTINGS:
return NAMESPACE_SETTINGS_MARKER
return NAMESPACE_ENV_MARKER
@classmethod
def parse_key(cls) -> Tuple[Optional[str], str]:
marker = cls.get_marker()
parts = cls.key.split(marker)
if len(parts) > 2:
raise OptionException(
"Option declared with multi-namespace key `{}`.".format(cls.key)
)
if len(parts) == 1:
return None, cls.key
return parts[0], parts[1]
@classmethod
def get_namespace(cls) -> Optional[str]:
return cls.parse_key()[0]
@classmethod
def get_key_subject(cls):
return cls.parse_key()[1]
@classmethod
def to_dict(cls, value=None):
return {
"key": cls.key,
"typing": cls.typing,
"is_list": cls.is_list,
"is_secret": cls.is_secret,
"value": value if value is not None else cls.default,
"description": cls.description,
}
@classmethod
def _extra_processing(cls, value):
return value
@classmethod
def parse(cls, value):
_value = parser.TYPE_MAPPING[cls.typing](
key=cls.key,
value=value,
is_list=cls.is_list,
is_optional=cls.is_optional,
default=cls.default,
options=cls.options,
)
return cls._extra_processing(_value)
| apache-2.0 | 6,719,844,214,865,127,000 | 25.928 | 82 | 0.620915 | false |
xeneta/LeadQualifier | xeneta_qualifier/run.py | 1 | 1432 | import csv
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import precision_recall_fscore_support
import numpy as np
def convertToFloat(lst):
return np.array(lst).astype(np.float)
def fetchData(path):
labels = []
data = []
f = open(path)
csv_f = csv.reader(f)
for row in csv_f:
labels.append(convertToFloat(row[0]))
data.append(convertToFloat(row[1:]))
f.close()
return np.array(data), np.array(labels)
# Random Forest Classifier
def runForest(X_train, y_train):
forest = RandomForestClassifier(n_estimators=90, random_state=42)
forest.fit(X_train, y_train)
return forest
# Stochastic Gradient Descent Classifier
def runSGD(X_train, y_train):
sgd = SGDClassifier(n_iter=500, loss='modified_huber', penalty='elasticnet', random_state=42)
sgd.fit(X_train, y_train)
return sgd
def getScores(clf, X, y):
predictions = clf.predict(X)
scores = precision_recall_fscore_support(y, predictions, average='binary')
return scores
# Import data
X_test, y_test = fetchData('data/test.csv')
X_train, y_train = fetchData('data/train.csv')
forest = runForest(X_train, y_train)
forest_scores = getScores(forest, X_test, y_test)
print 'Random Forest Scores: ', forest_scores
sgd = runSGD(X_train, y_train)
sgd_scores = getScores(sgd, X_test, y_test)
print 'SGD Scores: ', sgd_scores
| mit | 3,723,876,254,262,012,400 | 27.078431 | 97 | 0.706704 | false |
patdaburu/djio | docs/source/conf.py | 1 | 7097 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# djio documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 12 15:57:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
# Determine the absolute path to the directory containing the python modules.
_pysrc = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..', '..'))
# Insert it into the path.
sys.path.insert(0, _pysrc)
# Now we can import local modules.
import djio
# -- Document __init__ methods by default. --------------------------------
# This section was added to allow __init__() to be documented automatically.
# You can comment this section out to go back to the default behavior.
# See: http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# http://docs.readthedocs.io/en/latest/faq.html
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
class BaseGeometry(object):
pass
MOCK_MODULES = [
'numpy',
'scipy',
'sklearn',
'matplotlib',
'matplotlib.pyplot',
'scipy.interpolate',
'scipy.special',
'math',
#'typing',
# #'__future__',
'toolboxutilities',
'CaseInsensitiveDict',
'geoalchemy2', 'geoalchemy2.types', 'geoalchemy2.shape',
'shapely', 'shapely.errors', 'shapely.geometry', 'shapely.geometry.base', 'shapely.wkb', 'shapely.wkt',
'measurement', 'measurement.measures', 'osgeo'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'djio'
copyright = '2018, Pat Daburu'
author = 'Pat Daburu'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = djio.__version__
# The full version, including alpha/beta/rc tags.
release = djio.__release__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'djiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'djio.tex', 'djio Documentation',
'Pat Daburu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djio', 'djio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'djio', 'djio Documentation',
author, 'djio', 'One line description of project.',
'Miscellaneous'),
]
| mit | 4,778,953,984,657,526,000 | 28.570833 | 114 | 0.670565 | false |
FarhansCode/file_repository | file_repository/models.py | 1 | 5494 | from __future__ import unicode_literals
from django.db import models
import os, re
# Single inode model for both files and directories
class Inode(models.Model):
# This will only be specified if Inode is a root
rootname = models.CharField(max_length=10, default='')
name = models.CharField(max_length=255)
# True = Directory, False = File
is_directory = models.BooleanField(default=False)
# Only makes sense if its a file
content = models.FileField(upload_to='file_repository/_files')
# Only makes senes if its a directory
inodes = models.ManyToManyField('Inode')
def __str__(self):
return self.name
def get_path(self):
path = ''
rootpath = self
while True:
if rootpath.inode_set.count() == 1:
rootpath = rootpath.inode_set.get()
if rootpath.name is not '/': # Not last element
path = rootpath.name + '/' + path
elif rootpath.name is '/': # Last element
path = '/' + path
break
else: # Only for root elements
path = '/' + path
break
return path
def create_file(self, name, content):
try:
exists = self.inodes.get(name=name)
raise Inode.NameConflict(name)
except Inode.DoesNotExist:
pass
new_file = Inode(is_directory=False, rootname=self.rootname)
new_file.content = content
new_file.name = name
new_file.save()
self.inodes.add(new_file)
return new_file
def create_directory(self, name):
try:
exists = self.inodes.get(name=name)
raise Inode.NameConflict(name)
except Inode.DoesNotExist:
pass
new_directory = Inode(is_directory=True, rootname=self.rootname)
new_directory.name = name
new_directory.save()
self.inodes.add(new_directory)
return new_directory
def deletenode(self):
if self.is_directory == False:
os.remove(self.content.path)
self.delete()
else:
# Recursively go through all subdirectories
directories = self.inodes.filter(is_directory = True)
for dir_inode in directories:
dir_inode.deletenode()
# Now delete them all
directories.all().delete()
# And then wipe out the files
files = self.inodes.filter(is_directory = False)
for subfile in files:
subfile.deletenode()
self.delete()
class methods:
def createroot(rootname):
newroot = Inode(name='/', rootname=rootname, is_directory=True)
newroot.save()
return newroot
def getroot(rootname):
root = Inode.objects.get(name='/',
rootname=rootname,
is_directory=True)
return root
def getinode(filedir, rootname):
try: # Get the root or die
rootdirectory = Inode.objects.get(rootname=rootname,
name='/',
is_directory=True)
except Inode.DoesNotExist:
raise Inode.Error500('rootname %s does not exist' % rootname)
if filedir == '' or filedir == '/':
return rootdirectory # Quit if its just the root
current_directory = rootdirectory
tempurl = filedir
while tempurl:
lastnode = re.match('^(\/)?([\w\.]+)?(\/?)$', tempurl)
if lastnode is not None:
try:
if lastnode.group(1) is '/' and \
lastnode.group(2) is None:
return current_directory
elif lastnode.group(2) is not None:
inode = current_directory.inodes.get(
name=lastnode.group(2))
if inode.is_directory == True and \
lastnode.group(3) is not '/':
raise Inode.Redirect302(filedir+'/')
return inode
except Inode.DoesNotExist:
raise Inode.Error404
response = re.match('^([\w\-\.\ ]+)\/([\w\-\.\ \/]+)', tempurl)
if response == None: # Its the last node, kick it back up
continue
tree, tempurl = response.groups()
if tree: # This is a directory
current_directory = current_directory.inodes.get(name=tree,
is_directory=True)
continue
class Error404(Exception):
def __str__(self):
return repr("Inode does not exist")
class Error500(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Redirect302(Exception):
def __init__(self, path):
self.newpath = path
class NameConflict(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
return repr("Inode %s already exists" % self.name)
| bsd-2-clause | -8,650,357,869,796,421,000 | 35.872483 | 79 | 0.503094 | false |
levic92/LCExtractor | lcextractor/webui.py | 1 | 1925 | #
# webui.py
#
# Copyright (C) 2009 Andrew Resch <[email protected]>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <[email protected]>
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from deluge.log import LOG as log
from deluge.ui.client import client
from deluge import component
from deluge.plugins.pluginbase import WebPluginBase
from common import get_resource
class WebUI(WebPluginBase):
def enable(self):
pass
def disable(self):
pass
scripts = [get_resource("lcextractor.js")]
debug_scripts = scripts
| gpl-3.0 | 5,186,887,640,016,582,000 | 34 | 78 | 0.737662 | false |
newmediamedicine/indivo_server_1_0 | codingsystems/data/loinc.py | 1 | 1891 | """
LOINC loading
Ben Adida
2010-08-25
"""
from django.utils import simplejson
from loadutils import create_codingsystem
import os.path
import csv
from codingsystems import models
def load(stream, codingsystem, delimiter='\t'):
"""
load data from a file input stream.
"""
csv_reader = csv.reader(stream, delimiter = delimiter)
FIELD_NAMES = ["loinc_num", "component", "property", "time_aspct", "system", "scale_typ", "method_typ", "relat_nms", "class", "source",
"dt_last_ch", "chng_type", "comments", "answerlist", "status", "map_to", "scope", "consumer_name", "ipcc_units", "reference",
"exact_cmp_sy", "molar_mass", "classtype", "formula", "species", "exmpl_answers", "acssym", "base_name", "final",
"naaccr_id", "code_table", "setroot", "panelelements", "survey_quest_text", "survey_quest_src", "unitsrequired", "submitted_units",
"relatednames2", "shortname", "order_obs", "cdisc_common_tests", "hl7_field_subfield_id", "external_copyright_notice", "example_units", "inpc_percentage",
"long_common_name", "hl7_v2_datatype", "hl7_v3_datatype", "curated_range_and_units", "document_section", "definition_description", "example_ucum_units"]
for row in csv_reader:
values = dict([(f, row[i]) for i, f in enumerate(FIELD_NAMES[:len(row)])])
models.CodedValue.objects.create(system = codingsystem,
code = values['loinc_num'],
physician_value = values['component'], consumer_value = values['consumer_name'])
def create_and_load_from(filepath):
if not os.path.isfile(filepath):
print "Can't load LOINC, the file does not exist at %s" % filepath
return
codingsystem = create_codingsystem('loinc', 'LOINC')
load(open(filepath, "r"), codingsystem)
| gpl-3.0 | -8,305,867,522,785,487,000 | 40.108696 | 173 | 0.620307 | false |
udacity/course-front-end-frameworks | lesson3/quizExpressions/unit_tests.py | 1 | 2472 | import re
is_correct = False
brace_regex = "{{.*}}"
color_regex = "(?:brick.)?color"
size_regex = "(?:brick.)?size"
price_regex = "(?:brick.)?price"
heading = widget_inputs["text1"]
brick_color = widget_inputs["text2"]
brick_size = widget_inputs["text3"]
brick_price = widget_inputs["text4"]
brick_description = widget_inputs["text5"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if heading == '':
is_correct = True
else:
commentizer("Do you think the heading should change if you use a different brick? Why would a different brick make the heading change?")
#check the brick's color matches a RegEx
if re.search( color_regex, brick_color ):
if not re.search( brace_regex, brick_color ):
is_correct = False
commentizer("What you entered into the color field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The color field is not correct.")
#check the brick's size matches a RegEx
if re.search( size_regex, brick_size ):
if not re.search( brace_regex, brick_size ):
is_correct = False
commentizer("What you entered into the size field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The size field is not correct.")
#check the brick's price matches a RegEx
if re.search( price_regex, brick_price ):
if not re.search( brace_regex, brick_price ):
is_correct = False
commentizer("What you entered into the price field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The price field is not correct.")
# if they're all unchecked
if not any([heading, brick_color, brick_size, brick_price, brick_description]):
is_correct = False
comments = []
comments.append('At least one of these should be converted into an expression.\n\nLook at the data in the template and ask yourself, "Will this change if I use a different brick?" If the answer is yes, then enter the expression into the appropriate field.')
if is_correct:
commentizer("Great job!")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| mit | -2,914,863,373,141,105,000 | 34.314286 | 261 | 0.691343 | false |
senarvi/senarvi-speech | filter-text/perplexity.py | 1 | 3615 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Routines for language model estimation and perplexity computation.
#
# Author: Seppo Enarvi
# http://users.marjaniemi.com/seppo/
import sys
import re
import tempfile
import subprocess
def read_word_segmentations(input_file):
wsegs = dict()
for line in input_file:
line = line.strip()
if line.startswith('#'):
continue
line = re.sub('\d*', '', line)
parts = line.split(r'+')
if len(parts) < 2:
parts = line.split(' ')
parts = [re.sub(' ', '', x) for x in parts]
wrd = ''
for part in parts:
wrd += part
wsegs[wrd] = parts
return wsegs
def word_perplexity(train_text, devel_text, vocabulary=None):
lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
command = [ 'ngram-count',
'-order', '2',
'-wbdiscount1', '-wbdiscount2',
'-interpolate1', '-interpolate2',
'-text', train_text,
'-lm', lm_file.name ]
if vocabulary is not None:
command.extend(['-unk', '-vocab', vocabulary])
subprocess.check_call(command)
command = [ 'ngram',
'-order', '2',
'-lm', lm_file.name,
'-ppl', devel_text]
if vocabulary is not None:
command.extend(['-unk', '-vocab', vocabulary])
output = subprocess.check_output(command).decode('utf-8').splitlines()
matches = re.search(b'(\d+) OOVs', output[0])
if matches:
num_oovs = int(matches.group(1))
else:
sys.stderr.write("Unable to parse OOVs from:\n")
sys.stderr.write(output[0])
sys.stderr.write("\n")
sys.exit(1)
matches = re.search(b'ppl= ?(\d+(.\d+)?)', output[1])
if matches:
perplexity = float(matches.group(1))
else:
sys.stderr.write("Unable to parse ppl from:\n")
sys.stderr.write(output[1])
sys.stderr.write("\n")
sys.exit(1)
return perplexity, num_oovs
# Segments text according to given word segmentation, to be used as subword
# language model training data.
def segment_text(input_file, output_file, wsegs):
for line in input_file:
line = line.strip()
words = line.split()
output_file.write("<s> <w> ")
for word in words:
subwords = wsegs[word]
for sw in subwords:
output_file.write(sw)
output_file.write(" ")
output_file.write("<w> ")
output_file.write("</s>\n")
def subword_perplexity(train_text, devel_text, wsegs, order=3):
if wsegs is None:
segmented_train_text = train_text
segmented_devel_text = devel_text
else:
segmented_train_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
segment_text(train_text, segmented_train_text, wsegs)
segmented_devel_text = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
segment_text(devel_text, segmented_devel_text, wsegs)
lm_file = tempfile.NamedTemporaryFile(mode='w+', encoding="utf-8")
command = [ 'ngram-count',
'-order', str(order),
'-wbdiscount1', '-wbdiscount2', '-wbdiscount3',
'-interpolate1', '-interpolate2', '-interpolate3',
'-text', segmented_train_text.name,
'-lm', lm_file.name ]
subprocess.check_call(command)
command = [ 'perplexity',
'-a', lm_file.name,
'-t', '2',
segmented_devel_text.name,
'-']
output = subprocess.check_output(command, stderr=subprocess.STDOUT).decode('utf-8')
matches = re.search('^Dropped:\s*(\d+) UNKS', output, re.MULTILINE)
if matches:
num_oovs = int(matches.group(1))
else:
sys.stderr.write("Unable to parse UNKS from:\n")
sys.stderr.write(output)
sys.exit(1)
matches = re.search('^Perplexity (\d+(.\d+)?)', output, re.MULTILINE)
if matches:
perplexity = float(matches.group(1))
else:
sys.stderr.write("Unable to parse Perplexity from:\n")
sys.stderr.write(output)
sys.exit(1)
return perplexity, num_oovs
| apache-2.0 | -6,104,108,968,600,764,000 | 27.464567 | 84 | 0.665007 | false |
GenericMappingTools/gmt-python | pygmt/tests/test_x2sys_cross.py | 1 | 7896 | # pylint: disable=unused-argument
"""
Tests for x2sys_cross.
"""
import os
from tempfile import TemporaryDirectory
import numpy as np
import numpy.testing as npt
import pandas as pd
import pytest
from pygmt import x2sys_cross, x2sys_init
from pygmt.datasets import load_sample_bathymetry
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import data_kind
@pytest.fixture(name="mock_x2sys_home")
def fixture_mock_x2sys_home(monkeypatch):
"""
Set the X2SYS_HOME environment variable to the current working directory
for the test session.
"""
monkeypatch.setenv("X2SYS_HOME", os.getcwd())
@pytest.fixture(scope="module", name="tracks")
def fixture_tracks():
"""
Load track data from the sample bathymetry file.
"""
dataframe = load_sample_bathymetry()
dataframe.columns = ["x", "y", "z"] # longitude, latitude, bathymetry
return [dataframe.query(expr="z > -20")] # reduce size of dataset
def test_x2sys_cross_input_file_output_file(mock_x2sys_home):
"""
Run x2sys_cross by passing in a filename, and output internal crossovers to
an ASCII txt file.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
outfile = os.path.join(tmpdir, "tmp_coe.txt")
output = x2sys_cross(
tracks=["@tut_ship.xyz"], tag=tag, coe="i", outfile=outfile, verbose="i"
)
assert output is None # check that output is None since outfile is set
assert os.path.exists(path=outfile) # check that outfile exists at path
_ = pd.read_csv(outfile, sep="\t", header=2) # ensure ASCII text file loads ok
return output
def test_x2sys_cross_input_file_output_dataframe(mock_x2sys_home):
"""
Run x2sys_cross by passing in a filename, and output internal crossovers to
a pandas.DataFrame.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, coe="i", verbose="i")
assert isinstance(output, pd.DataFrame)
assert output.shape == (14294, 12)
columns = list(output.columns)
assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"]
assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"]
return output
def test_x2sys_cross_input_dataframe_output_dataframe(mock_x2sys_home, tracks):
"""
Run x2sys_cross by passing in one dataframe, and output external crossovers
to a pandas.DataFrame.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
output = x2sys_cross(tracks=tracks, tag=tag, coe="i", verbose="i")
assert isinstance(output, pd.DataFrame)
assert output.shape == (14, 12)
columns = list(output.columns)
assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"]
assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"]
assert output.dtypes["i_1"].type == np.object_
assert output.dtypes["i_2"].type == np.object_
return output
def test_x2sys_cross_input_two_dataframes(mock_x2sys_home):
"""
Run x2sys_cross by passing in two pandas.DataFrame tables with a time
column, and output external crossovers to a pandas.DataFrame.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(
tag=tag, fmtfile="xyz", suffix="xyzt", units=["de", "se"], force=True
)
# Add a time row to the x2sys fmtfile
with open(file=os.path.join(tmpdir, "xyz.fmt"), mode="a") as fmtfile:
fmtfile.write("time\ta\tN\t0\t1\t0\t%g\n")
# Create pandas.DataFrame track tables
tracks = []
for i in range(2):
np.random.seed(seed=i)
track = pd.DataFrame(data=np.random.rand(10, 3), columns=("x", "y", "z"))
track["time"] = pd.date_range(start=f"2020-{i}1-01", periods=10, freq="ms")
tracks.append(track)
output = x2sys_cross(tracks=tracks, tag=tag, coe="e", verbose="i")
assert isinstance(output, pd.DataFrame)
assert output.shape == (30, 12)
columns = list(output.columns)
assert columns[:6] == ["x", "y", "t_1", "t_2", "dist_1", "dist_2"]
assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"]
assert output.dtypes["t_1"].type == np.datetime64
assert output.dtypes["t_2"].type == np.datetime64
def test_x2sys_cross_input_two_filenames(mock_x2sys_home):
"""
Run x2sys_cross by passing in two filenames, and output external crossovers
to a pandas.DataFrame.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
# Create temporary xyz files
for i in range(2):
np.random.seed(seed=i)
with open(os.path.join(os.getcwd(), f"track_{i}.xyz"), mode="w") as fname:
np.savetxt(fname=fname, X=np.random.rand(10, 3))
output = x2sys_cross(
tracks=["track_0.xyz", "track_1.xyz"], tag=tag, coe="e", verbose="i"
)
assert isinstance(output, pd.DataFrame)
assert output.shape == (24, 12)
columns = list(output.columns)
assert columns[:6] == ["x", "y", "i_1", "i_2", "dist_1", "dist_2"]
assert columns[6:] == ["head_1", "head_2", "vel_1", "vel_2", "z_X", "z_M"]
_ = [os.remove(f"track_{i}.xyz") for i in range(2)] # cleanup track files
return output
def test_x2sys_cross_invalid_tracks_input_type(tracks):
"""
Run x2sys_cross using tracks input that is not a pandas.DataFrame (matrix)
or str (file) type, which would raise a GMTInvalidInput error.
"""
invalid_tracks = tracks[0].to_xarray().z
assert data_kind(invalid_tracks) == "grid"
with pytest.raises(GMTInvalidInput):
x2sys_cross(tracks=[invalid_tracks])
def test_x2sys_cross_region_interpolation_numpoints(mock_x2sys_home):
"""
Test that x2sys_cross's region (R), interpolation (l) and numpoints (W)
arguments work.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
output = x2sys_cross(
tracks=["@tut_ship.xyz"],
tag=tag,
coe="i",
region=[245, 250, 20, 25],
interpolation="a", # Akima spline interpolation
numpoints=5, # Use up to 5 data points in interpolation
)
assert isinstance(output, pd.DataFrame)
assert output.shape == (3867, 12)
# Check crossover errors (z_X) and mean value of observables (z_M)
npt.assert_allclose(output.z_X.mean(), -139.2, rtol=1e-4)
npt.assert_allclose(output.z_M.mean(), -2890.465813)
def test_x2sys_cross_trackvalues(mock_x2sys_home):
"""
Test that x2sys_cross's trackvalues (Z) argument work.
"""
with TemporaryDirectory(prefix="X2SYS", dir=os.getcwd()) as tmpdir:
tag = os.path.basename(tmpdir)
x2sys_init(tag=tag, fmtfile="xyz", force=True)
output = x2sys_cross(tracks=["@tut_ship.xyz"], tag=tag, trackvalues=True)
assert isinstance(output, pd.DataFrame)
assert output.shape == (14294, 12)
# Check mean of track 1 values (z_1) and track 2 values (z_2)
npt.assert_allclose(output.z_1.mean(), -2420.569767)
npt.assert_allclose(output.z_2.mean(), -2400.357549)
| bsd-3-clause | -6,218,975,711,026,459,000 | 36.779904 | 87 | 0.618541 | false |
jardiacaj/finem_imperii | organization/test/test_arrest_warrant.py | 1 | 2003 | from django.test import TestCase
from django.urls.base import reverse
from organization.models.capability import Capability
from character.models import Character, CharacterEvent
class TestArrestWarrant(TestCase):
fixtures = ["simple_world"]
def setUp(self):
self.client.post(
reverse('account:login'),
{'username': 'alice', 'password': 'test'},
)
response = self.client.get(
reverse('character:activate', kwargs={'char_id': 1}),
follow=True
)
def test_warrant(self):
capability = Capability.objects.get(
organization_id=102,
type=Capability.ARREST_WARRANT,
applying_to_id=101
)
to_arrest = Character.objects.get(id=2)
response = self.client.get(capability.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, "King")
self.assertContains(response, "Kingdom Member")
response = self.client.post(
reverse('organization:arrest_warrant_capability', kwargs={
'capability_id': capability.id
}),
data={'character_to_imprison_id': to_arrest.id},
follow=True
)
self.assertRedirects(
response, capability.organization.get_absolute_url())
warrant = CharacterEvent.objects.get(
character=to_arrest,
active=True,
organization_id=101,
type=CharacterEvent.ARREST_WARRANT
)
response = self.client.post(
reverse('organization:arrest_warrant_revoke_capability', kwargs={
'capability_id': capability.id,
'warrant_id': warrant.id
}),
data={},
follow=True
)
self.assertRedirects(
response, capability.organization.get_absolute_url())
warrant.refresh_from_db()
self.assertFalse(warrant.active)
| agpl-3.0 | -8,755,682,711,672,305,000 | 30.793651 | 77 | 0.589116 | false |
stdgy/adventofcode | 2016/days/8/solution.py | 1 | 2860 | class Screen(object):
def count_lit_pixels(self):
count = 0
for row in self.monitor:
for val in row:
if val == '1':
count = count + 1
return count
def display(self):
for row in self.monitor:
print(''.join(row))
def draw(self, width, height):
for x in range(height):
for y in range(width):
self.monitor[x][y] = '1'
def shift_row_right(self, row, shift):
r = self.monitor[row]
self.monitor[row] = self._shift_list(r, shift)
def shift_col_down(self, col, shift):
# Copy column into a row to make it easier to deal with
new_column = self._copy_column(col)
# Shift values in copied column
new_column = self._shift_list(new_column, shift)
# Put the modified values back into their home column
for idx, val in enumerate(new_column):
self.monitor[idx][col] = val
def _shift_list(self, l, shift_amount):
new_list = []
list_length = len(l)
for idx in range(list_length):
val = l[(idx - shift_amount) % list_length]
new_list.append(val)
return new_list
def _copy_column(self, col):
column = []
for row in range(len(self.monitor)):
column.append(self.monitor[row][col])
return column
def __init__(self):
self.monitor = []
# Initialize monitor to all off
for row in range(6):
row = []
for col in range(50):
row.append('0')
self.monitor.append(row)
def parse_rect(line):
dimensions = line[4:]
dimensions = dimensions.split('x')
dimensions = map(int, dimensions)
return dimensions
def parse_rotate_row(line):
rotate_params = line[13:]
rotate_params = rotate_params.split('by')
rotate_params = map(str.strip, rotate_params)
rotate_params = map(int, rotate_params)
return rotate_params
def parse_rotate_col(line):
rotate_params = line[16:]
rotate_params = rotate_params.split('by')
rotate_params = map(str.strip, rotate_params)
rotate_params = map(int, rotate_params)
return rotate_params
if __name__ == '__main__':
s = Screen()
with open('input.txt') as file:
for line in file:
if 'rect' == line[:4]:
width, height = parse_rect(line)
s.draw(width, height)
elif 'rotate row' == line[:10]:
row, shift = parse_rotate_row(line)
s.shift_row_right(row, shift)
elif 'rotate column' == line[:13]:
col, shift = parse_rotate_col(line)
s.shift_col_down(col, shift)
s.display()
print('There are {} pixels lit on the display.'.format(s.count_lit_pixels()))
| mit | -7,901,886,625,612,999,000 | 29.425532 | 81 | 0.552098 | false |
jweyrich/livemgr-webui | webui/livemgr/models/dummy.py | 1 | 1271 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Jardel Weyrich
#
# This file is part of livemgr-webui.
#
# livemgr-webui is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# livemgr-webui is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with livemgr-webui. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Jardel Weyrich <[email protected]>
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Dashboard(models.Model):
class Meta:
app_label = 'livemgr'
db_table = ''
managed = False
verbose_name = _('dashboard')
permissions = (
("see_dashboard", "Can see dashboard"),
)
class Support(models.Model):
class Meta:
app_label = 'livemgr'
db_table = ''
managed = False
verbose_name = _('support')
permissions = (
("see_support", "Can see support"),
)
| gpl-3.0 | 47,185,329,086,548,020 | 27.886364 | 71 | 0.711251 | false |
zhangpf/image-tget | tget/filemanager.py | 1 | 7274 | #
# -*-encoding:utf-8-*-
import os
import hashlib
import logging
from twisted.python import log
from twisted.internet import reactor, defer
from bitfield import BitField
from utils import sleep
class BTFileError (Exception) :
pass
class BTHashTestError (Exception):
pass
class BTFile:
def __init__(self, metainfo, saveDir):
self.metainfo = metainfo
self.path = os.path.join(saveDir, metainfo.path)
self.length = metainfo.file_length
self.piece_len = metainfo.piece_length
self.hash_array = metainfo.pieces_hash
self.pieces_size = metainfo.pieces_size
self.fd = None
logging.info("the saved file path is %s" % self.path)
if os.path.exists(self.path):
self.fd = open(self.path, 'rb+')
else:
dirname = os.path.dirname(self.path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.fd = open(self.path, 'wb')
#print self.abs_pos0, self.abs_pos1, self.piece_len, self.idx0_piece, self.idx1_piece
h, t = os.path.split(self.path)
if not os.path.exists(h):
os.makedirs(h)
def write(self, begin, data):
if begin < 0:
raise BTFileError("Invalid write begin position.")
elif len(data) + begin > self.length:
raise BTFileError("Invalid write end position.")
self.fd.seek(begin)
self.fd.write(data)
def read(self, begin, length):
if length < 0:
raise BTFileError("Invalid read length.")
elif begin < 0:
raise BTFileError("Invalid read begin position.")
elif begin + length > self.length:
raise BTFileError("Invalid read end position.")
self.fd.seek(begin)
data = self.fd.read(length)
return data
def close(self):
if self.fd :
self.fd.close()
self.fd = None
def get_bitfield(self):
bf_need = BitField(self.pieces_size)
bf_have = BitField(self.pieces_size)
for i in xrange(self.pieces_size):
try :
data = self[i]
if data and self.do_hash_test(i, data):
bf_have[i] = 1
bf_need[i] = 0
else:
bf_have[i] = 0
bf_need[i] = 1
except BTFileError as error :
pass
print bf_have
print bf_need
return bf_have, bf_need
def do_hash_test(self, idx, data):
return hashlib.sha1(data).digest() == self.hash_array[idx]
def __getitem__(self, idx):
end = min((idx + 1) * self.piece_len, self.length)
return self.read(idx * self.piece_len, end - idx * self.piece_len)
def __setitem__(self, idx, data):
self.write(idx * self.piece_len, data)
class BTFileManager :
def __init__(self, btm):
self.btm = btm
self.config = btm.config
metainfo = self.config.metainfo
self.metainfo = metainfo
self.piece_length = metainfo.piece_length
self.pieces_size = metainfo.pieces_size
self.btfile = BTFile(metainfo, self.btm.app.save_dir)
self.bitfield_have, self.bitfield_need = self.btfile.get_bitfield()
self.buffer_max_size = 100 * 2**20 / self.piece_length
self.buffer = {}
self.buffer_record = []
self.buffer_dirty = {}
def start(self) :
self.status = 'started'
reactor.callLater(2, self.deamon_write)
reactor.callLater(2, self.deamon_read)
def stop(self) :
for idx, data in self.buffer_dirty.iteritems():
self.write(idx, data)
self.buffer_dirty.clear()
self.buffer.clear()
del self.buffer_record[:]
self.status = 'stopped'
@defer.inlineCallbacks
def deamon_write(self):
while self.status == 'started':
self.__thread_write()
yield sleep(2)
def __thread_write(self):
if not hasattr(self, '__thread_write_status') :
self.__thread_write_status = 'stopped'
if self.__thread_write_status == 'running' :
return
if not self.buffer_dirty :
return
bfd = self.buffer_dirty.copy()
def call_in_thread():
# Writing to disk
for idx in sorted(bfd.keys()) :
data = bfd[idx]
self.write(idx, data)
reactor.callFromThread(call_from_thread)
def call_from_thread():
self.__thread_write_status = 'stopped'
for idx, data in bfd.iteritems() :
if data is self.buffer_dirty[idx] :
del self.buffer_dirty[idx]
if self.__thread_write_status == 'stopped' :
self.__thread_write_status = 'running'
reactor.callInThread(call_in_thread)
@defer.inlineCallbacks
def deamon_read(self):
while self.status == 'started':
size = len(self.buffer)
if size > self.buffer_max_size :
remove_count = size - self.buffer_max_size
remove_count += self.buffer_max_size / 5
for idx in self.buffer_record[:remove_count] :
del self.buffer[idx]
del self.buffer_record[:remove_count]
yield sleep(10)
############################################################
def read_piece(self, index) :
if not (0 <= index < self.pieces_size) :
raise BTFileError('index is out of range')
if not self.bitfield_have[index] :
raise BTFileError('index is not downloaded')
if index in self.buffer :
data = self.buffer[index]
self.buffer_record.remove(index)
self.buffer_record.append(index)
return data
else:
for idx in [index, index+1, index+2, index+3] :
if 0 <= idx < self.pieces_size and idx not in self.buffer :
data = self.read(idx)
assert data
self.buffer[idx] = data
self.buffer_record.append(idx)
data = self.read_piece(index)
return data
def write_piece(self, index, data) :
if not (0 <= index < self.pieces_size) :
raise BTFileError('index is out of range')
if not self.bitfield_need[index] :
raise BTFileError('index is not need')
if not self.btfile.do_hash_test(index, data):
raise BTHashTestError()
else:
self.bitfield_have[index] = 1
self.bitfield_need[index] = 0
if index in self.buffer :
self.buffer[index] = data
self.buffer_dirty[index] = data
if self.bitfield_have.allOne():
logging.info('almost done!')
return True
def read(self, index):
if index in self.buffer_dirty:
return self.buffer_dirty[index]
return self.btfile[index]
def write(self, index, data) :
self.btfile[index] = data
| apache-2.0 | -8,943,494,309,047,883,000 | 29.057851 | 93 | 0.537256 | false |
odoo-l10n-ar/l10n_ar_wsafip_fe | addons/l10n_ar_wsafip_fe/wizard/validate_invoices.py | 1 | 2508 | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
import logging
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
class validate_invoices(osv.osv_memory):
_name = 'l10n_ar_wsafip_fe.validate_invoices'
_description = 'Generate CAE from validated invoices'
_columns = {
'journal_id': fields.many2one(
'account.journal', 'Journal', required=True),
'first_invoice_number': fields.integer(
'First invoice number', required=True),
'last_invoice_number': fields.integer(
'Last invoice number', required=True),
}
_defaults = {
'first_invoice_number': 1,
'last_invoice_number': 1,
}
def onchange_journal_id(self, cr, uid, ids, first_invoice_number,
journal_id):
journal_obj = self.pool.get('account.journal')
res = {}
if journal_id:
num_items = journal_obj.browse(cr, uid, journal_id
).sequence_id.number_next - 1
res['value'] = {
'first_invoice_number': min(first_invoice_number, num_items),
'last_invoice_number': num_items,
}
return res
def execute(self, cr, uid, ids, context=None):
context = context or {}
invoice_obj = self.pool.get('account.invoice')
for qi in self.browse(cr, uid, ids):
journal_id = qi.journal_id.id
number_format = "%s%%0%sd%s" % (
qi.journal_id.sequence_id.prefix,
qi.journal_id.sequence_id.padding,
qi.journal_id.sequence_id.suffix)
# Obtengo la lista de facturas necesitan un CAE y estΓ‘n validadas.
inv_ids = invoice_obj.search(cr, uid,
[('journal_id', '=', journal_id),
('state', '!=', 'draft'),
('afip_cae', '=', False),
('number', '>=', number_format %
qi.first_invoice_number),
('number', '<=', number_format %
qi.last_invoice_number)],
order='date_invoice')
invoice_obj.action_retrieve_cae(cr, uid, inv_ids)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,137,015,824,672,131,600 | 37.569231 | 78 | 0.493418 | false |
bcgov/gwells | app/backend/wells/migrations/0104_auto_20191121_2152.py | 1 | 21615 | # Generated by Django 2.2.7 on 2019-11-21 21:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0103_auto_20191016_2137'),
]
operations = [
migrations.AlterField(
model_name='activitysubmission',
name='aquifer_lithology',
field=models.ForeignKey(blank=True, db_column='aquifer_lithology_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.AquiferLithologyCode', verbose_name='Aquifer Lithology'),
),
migrations.AlterField(
model_name='activitysubmission',
name='boundary_effect',
field=models.ForeignKey(blank=True, db_column='boundary_effect_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BoundaryEffectCode', verbose_name='Boundary Effect'),
),
migrations.AlterField(
model_name='activitysubmission',
name='filter_pack_material',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialCode', verbose_name='Filter Pack Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='filter_pack_material_size',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_size_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialSizeCode', verbose_name='Filter Pack Material Size'),
),
migrations.AlterField(
model_name='activitysubmission',
name='ground_elevation_method',
field=models.ForeignKey(blank=True, db_column='ground_elevation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.GroundElevationMethodCode', verbose_name='Elevation Determined By'),
),
migrations.AlterField(
model_name='activitysubmission',
name='intended_water_use',
field=models.ForeignKey(blank=True, db_column='intended_water_use_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.IntendedWaterUseCode', verbose_name='Intended Water Use'),
),
migrations.AlterField(
model_name='activitysubmission',
name='land_district',
field=models.ForeignKey(blank=True, db_column='land_district_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LandDistrictCode', verbose_name='Land District'),
),
migrations.AlterField(
model_name='activitysubmission',
name='liner_material',
field=models.ForeignKey(blank=True, db_column='liner_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LinerMaterialCode', verbose_name='Liner Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='owner_province_state',
field=models.ForeignKey(blank=True, db_column='province_state_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ProvinceStateCode', verbose_name='Province'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_bottom',
field=models.ForeignKey(blank=True, db_column='screen_bottom_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenBottomCode', verbose_name='Bottom'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_intake_method',
field=models.ForeignKey(blank=True, db_column='screen_intake_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenIntakeMethodCode', verbose_name='Intake'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_material',
field=models.ForeignKey(blank=True, db_column='screen_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_opening',
field=models.ForeignKey(blank=True, db_column='screen_opening_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenOpeningCode', verbose_name='Opening'),
),
migrations.AlterField(
model_name='activitysubmission',
name='screen_type',
field=models.ForeignKey(blank=True, db_column='screen_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenTypeCode', verbose_name='Type'),
),
migrations.AlterField(
model_name='activitysubmission',
name='surface_seal_material',
field=models.ForeignKey(blank=True, db_column='surface_seal_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMaterialCode', verbose_name='Surface Seal Material'),
),
migrations.AlterField(
model_name='activitysubmission',
name='surface_seal_method',
field=models.ForeignKey(blank=True, db_column='surface_seal_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMethodCode', verbose_name='Surface Seal Installation Method'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well',
field=models.ForeignKey(blank=True, db_column='well_tag_number', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.Well'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_activity_type',
field=models.ForeignKey(db_column='well_activity_code', on_delete=django.db.models.deletion.PROTECT, to='submissions.WellActivityCode', verbose_name='Type of Work'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_class',
field=models.ForeignKey(blank=True, db_column='well_class_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellClassCode', verbose_name='Well Class'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_publication_status',
field=models.ForeignKey(db_column='well_publication_status_code', default='Published', on_delete=django.db.models.deletion.PROTECT, to='wells.WellPublicationStatusCode', verbose_name='Well Publication Status'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_status',
field=models.ForeignKey(blank=True, db_column='well_status_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellStatusCode', verbose_name='Well Status'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_subclass',
field=models.ForeignKey(blank=True, db_column='well_subclass_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellSubclassCode', verbose_name='Well Subclass'),
),
migrations.AlterField(
model_name='activitysubmission',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='activitysubmission',
name='yield_estimation_method',
field=models.ForeignKey(blank=True, db_column='yield_estimation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.YieldEstimationMethodCode', verbose_name='Estimation Method'),
),
migrations.AlterField(
model_name='casing',
name='casing_code',
field=models.ForeignKey(db_column='casing_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.CasingCode', verbose_name='Casing Type Code'),
),
migrations.AlterField(
model_name='casing',
name='casing_material',
field=models.ForeignKey(blank=True, db_column='casing_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.CasingMaterialCode', verbose_name='Casing Material Code'),
),
migrations.AlterField(
model_name='casing',
name='drive_shoe_status',
field=models.ForeignKey(blank=True, db_column='drive_shoe_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DriveShoeCode', verbose_name='Drive Shoe Code'),
),
migrations.AlterField(
model_name='lithologydescription',
name='bedrock_material',
field=models.ForeignKey(blank=True, db_column='bedrock_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.BedrockMaterialCode', verbose_name='Bedrock Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='bedrock_material_descriptor',
field=models.ForeignKey(blank=True, db_column='bedrock_material_descriptor_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.BedrockMaterialDescriptorCode', verbose_name='Descriptor'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_colour',
field=models.ForeignKey(blank=True, db_column='lithology_colour_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyColourCode', verbose_name='Colour'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_description',
field=models.ForeignKey(blank=True, db_column='lithology_description_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyDescriptionCode', verbose_name='Description'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_hardness',
field=models.ForeignKey(blank=True, db_column='lithology_hardness_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyHardnessCode', verbose_name='Hardness'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_material',
field=models.ForeignKey(blank=True, db_column='lithology_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_moisture',
field=models.ForeignKey(blank=True, db_column='lithology_moisture_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyMoistureCode', verbose_name='Moisture'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_structure',
field=models.ForeignKey(blank=True, db_column='lithology_structure_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.LithologyStructureCode', verbose_name='Bedding'),
),
migrations.AlterField(
model_name='lithologydescription',
name='secondary_surficial_material',
field=models.ForeignKey(blank=True, db_column='secondary_surficial_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='secondary_surficial_material_set', to='gwells.SurficialMaterialCode', verbose_name='Secondary Surficial Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='surficial_material',
field=models.ForeignKey(blank=True, db_column='surficial_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='surficial_material_set', to='gwells.SurficialMaterialCode', verbose_name='Surficial Material'),
),
migrations.AlterField(
model_name='lithologydescription',
name='water_bearing_estimated_flow_units',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode', verbose_name='Units'),
),
migrations.AlterField(
model_name='screen',
name='assembly_type',
field=models.ForeignKey(blank=True, db_column='screen_assembly_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenAssemblyTypeCode'),
),
migrations.AlterField(
model_name='well',
name='aquifer_lithology',
field=models.ForeignKey(blank=True, db_column='aquifer_lithology_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.AquiferLithologyCode', verbose_name='Aquifer Lithology'),
),
migrations.AlterField(
model_name='well',
name='boundary_effect',
field=models.ForeignKey(blank=True, db_column='boundary_effect_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.BoundaryEffectCode', verbose_name='Boundary Effect'),
),
migrations.AlterField(
model_name='well',
name='drilling_company',
field=models.ForeignKey(blank=True, db_column='drilling_company_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.DrillingCompany', verbose_name='Drilling Company'),
),
migrations.AlterField(
model_name='well',
name='filter_pack_material',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialCode', verbose_name='Filter Pack Material'),
),
migrations.AlterField(
model_name='well',
name='filter_pack_material_size',
field=models.ForeignKey(blank=True, db_column='filter_pack_material_size_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.FilterPackMaterialSizeCode', verbose_name='Filter Pack Material Size'),
),
migrations.AlterField(
model_name='well',
name='ground_elevation_method',
field=models.ForeignKey(blank=True, db_column='ground_elevation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.GroundElevationMethodCode', verbose_name='Elevation Determined By'),
),
migrations.AlterField(
model_name='well',
name='intended_water_use',
field=models.ForeignKey(blank=True, db_column='intended_water_use_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.IntendedWaterUseCode', verbose_name='Intended Water Use'),
),
migrations.AlterField(
model_name='well',
name='land_district',
field=models.ForeignKey(blank=True, db_column='land_district_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LandDistrictCode', verbose_name='Land District'),
),
migrations.AlterField(
model_name='well',
name='liner_material',
field=models.ForeignKey(blank=True, db_column='liner_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.LinerMaterialCode', verbose_name='Liner Material'),
),
migrations.AlterField(
model_name='well',
name='owner_province_state',
field=models.ForeignKey(blank=True, db_column='province_state_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ProvinceStateCode', verbose_name='Province'),
),
migrations.AlterField(
model_name='well',
name='screen_bottom',
field=models.ForeignKey(blank=True, db_column='screen_bottom_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenBottomCode', verbose_name='Bottom'),
),
migrations.AlterField(
model_name='well',
name='screen_intake_method',
field=models.ForeignKey(blank=True, db_column='screen_intake_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenIntakeMethodCode', verbose_name='Intake Method'),
),
migrations.AlterField(
model_name='well',
name='screen_material',
field=models.ForeignKey(blank=True, db_column='screen_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenMaterialCode', verbose_name='Material'),
),
migrations.AlterField(
model_name='well',
name='screen_opening',
field=models.ForeignKey(blank=True, db_column='screen_opening_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenOpeningCode', verbose_name='Opening'),
),
migrations.AlterField(
model_name='well',
name='screen_type',
field=models.ForeignKey(blank=True, db_column='screen_type_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='gwells.ScreenTypeCode', verbose_name='Type'),
),
migrations.AlterField(
model_name='well',
name='surface_seal_material',
field=models.ForeignKey(blank=True, db_column='surface_seal_material_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMaterialCode', verbose_name='Surface Seal Material'),
),
migrations.AlterField(
model_name='well',
name='surface_seal_method',
field=models.ForeignKey(blank=True, db_column='surface_seal_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.SurfaceSealMethodCode', verbose_name='Surface Seal Installation Method'),
),
migrations.AlterField(
model_name='well',
name='well_class',
field=models.ForeignKey(db_column='well_class_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellClassCode', verbose_name='Well Class'),
),
migrations.AlterField(
model_name='well',
name='well_disinfected_status',
field=models.ForeignKey(blank=True, db_column='well_disinfected_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellDisinfectedCode', verbose_name='Well Disinfected Code'),
),
migrations.AlterField(
model_name='well',
name='well_orientation_status',
field=models.ForeignKey(blank=True, db_column='well_orientation_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellOrientationCode', verbose_name='Well Orientation Code'),
),
migrations.AlterField(
model_name='well',
name='well_publication_status',
field=models.ForeignKey(db_column='well_publication_status_code', default='Published', on_delete=django.db.models.deletion.PROTECT, to='wells.WellPublicationStatusCode', verbose_name='Well Publication Status'),
),
migrations.AlterField(
model_name='well',
name='well_status',
field=models.ForeignKey(blank=True, db_column='well_status_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellStatusCode', verbose_name='Well Status'),
),
migrations.AlterField(
model_name='well',
name='well_subclass',
field=models.ForeignKey(blank=True, db_column='well_subclass_guid', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellSubclassCode', verbose_name='Well Subclass'),
),
migrations.AlterField(
model_name='well',
name='well_yield_unit',
field=models.ForeignKey(blank=True, db_column='well_yield_unit_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.WellYieldUnitCode'),
),
migrations.AlterField(
model_name='well',
name='yield_estimation_method',
field=models.ForeignKey(blank=True, db_column='yield_estimation_method_code', null=True, on_delete=django.db.models.deletion.PROTECT, to='wells.YieldEstimationMethodCode', verbose_name='Estimation Method'),
),
]
| apache-2.0 | 4,704,822,000,817,943,000 | 61.834302 | 280 | 0.661161 | false |
CIRCL/AIL-framework | update/v1.5/Update-ARDB_Metadata.py | 1 | 6117 | #!/usr/bin/env python3
# -*-coding:UTF-8 -*
import os
import sys
import time
import redis
sys.path.append(os.path.join(os.environ['AIL_BIN'], 'lib/'))
import ConfigLoader
def update_tracked_terms(main_key, tracked_container_key):
for tracked_item in r_serv_term.smembers(main_key):
all_items = r_serv_term.smembers(tracked_container_key.format(tracked_item))
for item_path in all_items:
if PASTES_FOLDER in item_path:
new_item_path = item_path.replace(PASTES_FOLDER, '', 1)
r_serv_term.sadd(tracked_container_key.format(tracked_item), new_item_path)
r_serv_term.srem(tracked_container_key.format(tracked_item), item_path)
def update_hash_item(has_type):
#get all hash items:
all_hash_items = r_serv_tag.smembers('infoleak:automatic-detection=\"{}\"'.format(has_type))
for item_path in all_hash_items:
if PASTES_FOLDER in item_path:
base64_key = '{}_paste:{}'.format(has_type, item_path)
hash_key = 'hash_paste:{}'.format(item_path)
if r_serv_metadata.exists(base64_key):
new_base64_key = base64_key.replace(PASTES_FOLDER, '', 1)
res = r_serv_metadata.renamenx(base64_key, new_base64_key)
if res == 0:
print('same key, double name: {}'.format(item_path))
# fusion
all_key = r_serv_metadata.smembers(base64_key)
for elem in all_key:
r_serv_metadata.sadd(new_base64_key, elem)
r_serv_metadata.srem(base64_key, elem)
if r_serv_metadata.exists(hash_key):
new_hash_key = hash_key.replace(PASTES_FOLDER, '', 1)
res = r_serv_metadata.renamenx(hash_key, new_hash_key)
if res == 0:
print('same key, double name: {}'.format(item_path))
# fusion
all_key = r_serv_metadata.smembers(hash_key)
for elem in all_key:
r_serv_metadata.sadd(new_hash_key, elem)
r_serv_metadata.srem(hash_key, elem)
if __name__ == '__main__':
start_deb = time.time()
config_loader = ConfigLoader.ConfigLoader()
PASTES_FOLDER = os.path.join(os.environ['AIL_HOME'], config_loader.get_config_str("Directories", "pastes")) + '/'
r_serv = config_loader.get_redis_conn("ARDB_DB")
r_serv_metadata = config_loader.get_redis_conn("ARDB_Metadata")
r_serv_tag = config_loader.get_redis_conn("ARDB_Tags")
r_serv_term = config_loader.get_redis_conn("ARDB_TermFreq")
r_serv_onion = config_loader.get_redis_conn("ARDB_Onion")
config_loader = None
r_serv.set('ail:current_background_script', 'metadata')
## Update metadata ##
print('Updating ARDB_Metadata ...')
index = 0
start = time.time()
#update stats
r_serv.set('ail:current_background_script_stat', 0)
# Update base64
update_hash_item('base64')
#update stats
r_serv.set('ail:current_background_script_stat', 20)
# Update binary
update_hash_item('binary')
#update stats
r_serv.set('ail:current_background_script_stat', 40)
# Update binary
update_hash_item('hexadecimal')
#update stats
r_serv.set('ail:current_background_script_stat', 60)
total_onion = r_serv_tag.scard('infoleak:submission=\"crawler\"')
nb_updated = 0
last_progress = 0
# Update onion metadata
all_crawled_items = r_serv_tag.smembers('infoleak:submission=\"crawler\"')
for item_path in all_crawled_items:
domain = None
if PASTES_FOLDER in item_path:
old_item_metadata = 'paste_metadata:{}'.format(item_path)
item_path = item_path.replace(PASTES_FOLDER, '', 1)
new_item_metadata = 'paste_metadata:{}'.format(item_path)
res = r_serv_metadata.renamenx(old_item_metadata, new_item_metadata)
#key already exist
if res == 0:
r_serv_metadata.delete(old_item_metadata)
# update domain port
domain = r_serv_metadata.hget(new_item_metadata, 'domain')
if domain:
if domain[-3:] != ':80':
r_serv_metadata.hset(new_item_metadata, 'domain', '{}:80'.format(domain))
super_father = r_serv_metadata.hget(new_item_metadata, 'super_father')
if super_father:
if PASTES_FOLDER in super_father:
r_serv_metadata.hset(new_item_metadata, 'super_father', super_father.replace(PASTES_FOLDER, '', 1))
father = r_serv_metadata.hget(new_item_metadata, 'father')
if father:
if PASTES_FOLDER in father:
r_serv_metadata.hset(new_item_metadata, 'father', father.replace(PASTES_FOLDER, '', 1))
nb_updated += 1
progress = int((nb_updated * 30) /total_onion)
print('{}/{} updated {}%'.format(nb_updated, total_onion, progress + 60))
# update progress stats
if progress != last_progress:
r_serv.set('ail:current_background_script_stat', progress + 60)
last_progress = progress
#update stats
r_serv.set('ail:current_background_script_stat', 90)
## update tracked term/set/regex
# update tracked term
update_tracked_terms('TrackedSetTermSet', 'tracked_{}')
#update stats
r_serv.set('ail:current_background_script_stat', 93)
# update tracked set
update_tracked_terms('TrackedSetSet', 'set_{}')
#update stats
r_serv.set('ail:current_background_script_stat', 96)
# update tracked regex
update_tracked_terms('TrackedRegexSet', 'regex_{}')
#update stats
r_serv.set('ail:current_background_script_stat', 100)
##
end = time.time()
print('Updating ARDB_Metadata Done => {} paths: {} s'.format(index, end - start))
print()
r_serv.sadd('ail:update_v1.5', 'metadata')
##
#Key, Dynamic Update
##
#paste_children
#nb_seen_hash, base64_hash, binary_hash
#paste_onion_external_links
#misp_events, hive_cases
##
| agpl-3.0 | -8,689,213,696,531,513,000 | 35.849398 | 117 | 0.600294 | false |
zippynk/ripoffbot | tools/printDatabase.py | 1 | 2552 | #!/usr/bin/env python
# Tool for viewing ripoffbot databases.
# Created by Nathan Krantz-Fire (a.k.a zippynk).
# Ships with ripoffbot - http://github.com/zippynk/ripoffbot
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import pickle
from datetime import datetime
thisVersion = [0,4,0] # The version of ripoffbot, as a list of numbers (eg [0,1,0] means "v0.1.0"). A "d" at the end means that the current version is a development version and very well may break at some point.
# Begin dev edition code.
if "d" in thisVersion:
print "WARNING! This is a development version of ripoffbot. Proceeding may corrupt ripoffbot database files, crash, and/or have other consequences. Proceed at your own risk."
if not raw_input("Are you sure you want to proceed? (y/n) ").lower() in ["yes","y","true","continue","yea","yeah","yup","sure"]:
print "Aborting."
exit(0)
# End Dev Edition Code.
if os.path.isfile(os.path.expanduser("~") +'/.ripoffbot_database.p'):
dbLoad = pickle.load(open(os.path.expanduser("~") +'/.ripoffbot_database.p','rb'))
if dbLoad['version'] == [0,2,0]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,3,0]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,3,1]:
messages = dbLoad['messages']
elif dbLoad['version'] == [0,4,0]:
messages = dbLoad['messages']
else:
print "This database was created with an old or unknown version of ripoffbot. Please use the newest version (or correct fork) and try again. If this is not possible or does not work, move or delete the file '~/.ripoffbot_database.p' and re-run ripoffbot. A new database will be created automatically. You may also want to try running recoverDeveloperVersion.py to recover a script marked with a developer version tag."
exit(0)
else:
print "No database found."
exit(0)
def saveDb(): # not needed for current functionality, but keeping just in case
if USEDB == True:
pickle.dump({'messages':messages,'version':thisVersion}, open(os.path.expanduser("~") +'/.ripoffbot_database.p','wb'))
print "Created with ripoffbot version: " +str(dbLoad['version'])
for i in messages:
print "{0} -> {1} - {2} ({3}, {4}): {5}".format(i[0],i[1],str(i[5]),"Sent publically" if i[4] else "Sent privately","To be delivered privately" if i[3] else "To be delivered publically",i[2])
| mpl-2.0 | 4,806,929,526,086,902,000 | 51.081633 | 426 | 0.680251 | false |
mongodb/motor | test/tornado_tests/test_motor_transaction.py | 1 | 7288 | # Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import re
from bson import json_util
from bson.json_util import JSONOptions
from pymongo.read_concern import ReadConcern
from pymongo.results import (BulkWriteResult,
InsertManyResult,
InsertOneResult,
UpdateResult, DeleteResult)
from motor.motor_tornado import (MotorCommandCursor,
MotorCursor,
MotorLatentCommandCursor)
from test.utils import TestListener
from test.version import Version
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import unittest
from pymongo import (ReadPreference,
WriteConcern)
from pymongo.errors import ConnectionFailure, OperationFailure
from tornado.testing import gen_test
from motor import core
from test.test_environment import env
from test.tornado_tests import MotorTest
class PatchSessionTimeout(object):
"""Patches the client_session's with_transaction timeout for testing."""
def __init__(self, mock_timeout):
self.real_timeout = core._WITH_TRANSACTION_RETRY_TIME_LIMIT
self.mock_timeout = mock_timeout
def __enter__(self):
core._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.mock_timeout
return self
def __exit__(self, exc_type, exc_val, exc_tb):
core._WITH_TRANSACTION_RETRY_TIME_LIMIT = self.real_timeout
class TestTransactionsConvenientAPI(MotorTest):
@env.require_transactions
@gen_test
async def test_basic(self):
# Create the collection.
await self.collection.insert_one({})
async def coro(session):
await self.collection.insert_one({'_id': 1}, session=session)
async with await self.cx.start_session() as s:
await s.with_transaction(coro,
read_concern=ReadConcern('local'),
write_concern=WriteConcern('majority'),
read_preference=ReadPreference.PRIMARY,
max_commit_time_ms=30000)
doc = await self.collection.find_one({'_id': 1})
self.assertEqual(doc, {'_id': 1})
@env.require_transactions
@gen_test
async def test_callback_raises_custom_error(self):
class _MyException(Exception):
pass
async def coro_raise_error(_):
raise _MyException()
async with await self.cx.start_session() as s:
with self.assertRaises(_MyException):
await s.with_transaction(coro_raise_error)
@env.require_transactions
@gen_test
async def test_callback_returns_value(self):
async def callback(_):
return 'Foo'
async with await self.cx.start_session() as s:
self.assertEqual(await s.with_transaction(callback), 'Foo')
await self.db.test.insert_one({})
async def callback(session):
await self.db.test.insert_one({}, session=session)
return 'Foo'
async with await self.cx.start_session() as s:
self.assertEqual(await s.with_transaction(callback), 'Foo')
@env.require_transactions
@gen_test
async def test_callback_not_retried_after_timeout(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
coll = client[self.db.name].test
async def callback(session):
await coll.insert_one({}, session=session)
err = {
'ok': 0,
'errmsg': 'Transaction 7819 has been aborted.',
'code': 251,
'codeName': 'NoSuchTransaction',
'errorLabels': ['TransientTransactionError'],
}
raise OperationFailure(err['errmsg'], err['code'], err)
# Create the collection.
await coll.insert_one({})
listener.results.clear()
async with await client.start_session() as s:
with PatchSessionTimeout(0):
with self.assertRaises(OperationFailure):
await s.with_transaction(callback)
self.assertEqual(listener.started_command_names(),
['insert', 'abortTransaction'])
@env.require_transactions
@gen_test
async def test_callback_not_retried_after_commit_timeout(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
coll = client[self.db.name].test
async def callback(session):
await coll.insert_one({}, session=session)
# Create the collection.
await coll.insert_one({})
await self.set_fail_point(client, {
'configureFailPoint': 'failCommand', 'mode': {'times': 1},
'data': {
'failCommands': ['commitTransaction'],
'errorCode': 251, # NoSuchTransaction
}})
listener.results.clear()
async with await client.start_session() as s:
with PatchSessionTimeout(0):
with self.assertRaises(OperationFailure):
await s.with_transaction(callback)
self.assertEqual(listener.started_command_names(),
['insert', 'commitTransaction'])
await self.set_fail_point(client, {
'configureFailPoint': 'failCommand', 'mode': 'off'})
@env.require_transactions
@gen_test
async def test_commit_not_retried_after_timeout(self):
listener = TestListener()
client = self.motor_client(event_listeners=[listener])
coll = client[self.db.name].test
async def callback(session):
await coll.insert_one({}, session=session)
# Create the collection.
await coll.insert_one({})
await self.set_fail_point(client, {
'configureFailPoint': 'failCommand', 'mode': {'times': 2},
'data': {
'failCommands': ['commitTransaction'],
'closeConnection': True}})
listener.results.clear()
async with await client.start_session() as s:
with PatchSessionTimeout(0):
with self.assertRaises(ConnectionFailure):
await s.with_transaction(callback)
# One insert for the callback and two commits (includes the automatic
# retry).
self.assertEqual(listener.started_command_names(),
['insert', 'commitTransaction', 'commitTransaction'])
self.set_fail_point(client, {
'configureFailPoint': 'failCommand', 'mode': 'off'})
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,682,843,098,331,572,700 | 34.55122 | 78 | 0.609221 | false |
khangnguyen/LeapController | LeapController.py | 1 | 6250 | ################################################################################
# Author: Khang Nguyen #
# Written: September 2013 #
################################################################################
import Leap, sys
from Leap import CircleGesture, KeyTapGesture, ScreenTapGesture, SwipeGesture
from appscript import *
from osax import *
class ItunesListener(Leap.Listener):
def on_init(self, controller):
print "Initialized"
self.itunes = app('itunes')
self.osax = OSAX()
self.swipes = {}
def on_connect(self, controller):
print "Connected"
# Enable gestures
controller.enable_gesture(Leap.Gesture.TYPE_CIRCLE);
controller.enable_gesture(Leap.Gesture.TYPE_KEY_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SCREEN_TAP);
controller.enable_gesture(Leap.Gesture.TYPE_SWIPE);
def on_disconnect(self, controller):
# Note: not dispatched when running in a debugger.
print "Disconnected"
def on_exit(self, controller):
print "Exited"
def on_frame(self, controller):
# Get the most recent frame and report some basic information
frame = controller.frame()
if not frame.hands.is_empty:
# Gestures
for gesture in frame.gestures():
if gesture.type == Leap.Gesture.TYPE_CIRCLE:
circle = CircleGesture(gesture)
# Determine clock direction using the angle between the pointable and the circle normal
if circle.pointable.direction.angle_to(circle.normal) <= Leap.PI/4:
clockwiseness = "clockwise"
else:
clockwiseness = "counterclockwise"
# Calculate the angle swept since the last frame
swept_angle = 0
if circle.state != Leap.Gesture.STATE_START:
previous_update = CircleGesture(controller.frame(1).gesture(circle.id))
swept_angle = (circle.progress - previous_update.progress) * 2 * Leap.PI
print "Circle id: %d, %s, progress: %f, radius: %f, angle: %f degrees, %s" % (
gesture.id, self.state_string(gesture.state),
circle.progress, circle.radius, swept_angle * Leap.RAD_TO_DEG, clockwiseness)
volumeSettings = self.osax.get_volume_settings()
currentVolume = volumeSettings[k.output_volume]
# Max vlue volumeSettings returns is 100
# But max value set_volume takes is 7
currentVolume = currentVolume * 7.0 / 100
if clockwiseness == 'clockwise':
self.osax.set_volume(currentVolume + 0.1)
else:
self.osax.set_volume(currentVolume - 0.1)
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
print "Swipe id: %d, state: %s, position: %s" % (
gesture.id, self.state_string(gesture.state), swipe.position)
if not self.swipes.get(gesture.id):
self.swipes[gesture.id] = {}
gestures = self.swipes.get(gesture.id)
if self.state_string(gesture.state) == "STATE_START":
gestures['STATE_START'] = gesture
if self.state_string(gesture.state) == "STATE_STOP":
gestures['STATE_STOP'] = gesture
if gestures.get('STATE_START') and gestures.get('STATE_STOP'):
startGesture = SwipeGesture(gestures['STATE_START'])
stopGesture = SwipeGesture(gestures['STATE_STOP'])
if startGesture.position[0] - stopGesture.position[0] > 70:
self.itunes.next_track()
elif startGesture.position[0] - stopGesture.position[0] < -70:
self.itunes.previous_track()
print "START x", startGesture.position[0]
print "STOP x", stopGesture.position[0]
if gesture.type == Leap.Gesture.TYPE_KEY_TAP:
keytap = KeyTapGesture(gesture)
print "Key Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
keytap.position, keytap.direction )
if gesture.type == Leap.Gesture.TYPE_SCREEN_TAP:
screentap = ScreenTapGesture(gesture)
print "Screen Tap id: %d, %s, position: %s, direction: %s" % (
gesture.id, self.state_string(gesture.state),
screentap.position, screentap.direction )
playerState = self.itunes.player_state()
if playerState == k.playing:
self.itunes.pause()
if playerState == k.paused:
self.itunes.play()
def state_string(self, state):
if state == Leap.Gesture.STATE_START:
return "STATE_START"
if state == Leap.Gesture.STATE_UPDATE:
return "STATE_UPDATE"
if state == Leap.Gesture.STATE_STOP:
return "STATE_STOP"
if state == Leap.Gesture.STATE_INVALID:
return "STATE_INVALID"
def main():
# Create an itunes listener and controller
listener = ItunesListener()
controller = Leap.Controller()
# Have the sample listener receive events from the controller
controller.add_listener(listener)
# Keep this process running until Enter is pressed
print "Press Enter to quit..."
sys.stdin.readline()
# Remove the sample listener when done
controller.remove_listener(listener)
if __name__ == "__main__":
main()
| mit | -6,920,043,446,760,458,000 | 43.326241 | 107 | 0.51616 | false |
danielhkl/matplotlib2tikz | test/test_image_plot.py | 1 | 1702 | # -*- coding: utf-8 -*-
#
import helpers
import pytest
# the picture 'lena.png' with origin='lower' is flipped upside-down.
# So it has to be upside-down in the pdf-file as well.
# test for monochrome picture
def plot1():
from matplotlib import rcParams
import matplotlib.pyplot as plt
from PIL import Image
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
lena = Image.open(os.path.join(this_dir, 'lena.png'))
lena = lena.convert('L')
dpi = rcParams['figure.dpi']
figsize = lena.size[0]/dpi, lena.size[1]/dpi
fig = plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
plt.imshow(lena, cmap='viridis', origin='lower')
# Set the current color map to HSV.
plt.hsv()
plt.colorbar()
return fig
# test for rgb picture
def plot2():
from matplotlib import rcParams
import matplotlib.pyplot as plt
from PIL import Image
import os
this_dir = os.path.dirname(os.path.realpath(__file__))
lena = Image.open(os.path.join(this_dir, 'lena.png'))
dpi = rcParams['figure.dpi']
figsize = lena.size[0] / dpi, lena.size[1] / dpi
fig = plt.figure(figsize=figsize)
ax = plt.axes([0, 0, 1, 1], frameon=False)
ax.set_axis_off()
plt.imshow(lena, cmap='viridis', origin='lower')
# Set the current color map to HSV.
plt.hsv()
plt.colorbar()
return fig
@pytest.mark.parametrize(
'plot, reference_phash', [
(plot1, '455361ec211d72fb'),
(plot2, '7558d3b30f634b06'),
]
)
def test(plot, reference_phash):
phash = helpers.Phash(plot())
assert phash.phash == reference_phash, phash.get_details()
return
| mit | -2,738,905,011,422,670,000 | 26.451613 | 68 | 0.640423 | false |
thurairaj92/pythonProjects | Treemap.py/node.py | 1 | 4358 | class Tree(object):
'''A tree that stores information of a directory, each node in the tree
contains the name and size of a file in the directory.'''
class Node(object):
'''A node in tree stores the size and name of a file.'''
def __init__(self, k):
'''(Node, tuple) -> Nonetype
Create a Node with key k, k is in the form of (filename, filesize).
and _range None, _range is the range that one file occupied
in the pygame screen.'''
self.key = k
self._range = None
def total(self):
'''Node -> int
Return the size of a file in the directory stored in Node.'''
return self.key[1]
def getting_range(self, x, y):
'''(Node, int, int) -> tuple
Getting the range that a file occupied in the pygame window,
using a helper function.'''
return _getting_range(self, x, y)
def __init__(self):
'''Tree -> NoneType
Create a Tree with root None, child as an empty list
and _range as None.'''
self.root = None
self.child = []
self._range = None
def __str__(self):
'''Tree -> str
Return the string representation of the root of a tree.'''
return self.root.key
def insert_directory(self, k):
'''(Tree, tuple) -> Nonetype
Insert a new directory at the end of Tree.
Tuple k is in the form of (directory, size)'''
if self.root:
new_tree = Tree()
new_tree.insert_directory(k)
self.child.append(new_tree)
else:
self.root = Tree.Node(k)
def insert_files(self, k):
'''(Tree, tuple) -> Nonetype
Insert a new file to a directory Tree.
Tuple k is in the form of (filename, size)'''
self.child.append(Tree.Node(k))
def search_tree(self, d):
'''(Tree, unicode) -> object
Search if the directory d is in the tree by a helper function.'''
return _search_tree(self, d)
def total(self):
'''Tree -> Nonetype
Return the total size of a directory Tree by a helper function.'''
return _total(self)
def getting_range(self, x, y):
'''(Tree, int, int) -> onject
Return the range of a Tree.'''
return _getting_range(self, x, y)
def _total(tree):
'''Tree -> tuple
Return the total size of a directory stored in Tree.
tuple is in the form of (x coordinate, y coordinate).'''
if tree.child:
_sum = tree.root.key[1]
for child in tree.child:
if type(child) == Tree:
_sum += child.total()
else:
_sum += child.total()
else:
return tree.root.key[1]
return _sum
def _getting_range(tree, x, y):
'''(Object, int, int) -> object
Return the file name and file size that (x, y) indicates in
pygame window.'''
if type(tree) == Tree:
if tree.child and tree._range:
if x in tree._range[0] and y in tree._range[1]:
for child in tree.child:
filename = _getting_range(child, x, y)
if filename:
return filename
return tree.root.key
else:
return None
elif tree._range and x in tree._range[0] and y in tree._range[1]:
return tree.root.key[0]
elif type(tree) == Tree.Node:
if tree._range and x in tree._range[0] and y in tree._range[1]:
return tree.key
return None
def _search_tree(tree, name):
'''(Tree, unicode) -> object
If name is in the tree, return the subtree start from where name is
located in the tree. Return True or False if name is leaf or not in
the tree.'''
if type(tree) == Tree:
if tree.root.key[0] == name:
return tree
else:
for child in tree.child:
contain_tree = _search_tree(child, name)
if type(contain_tree) == Tree:
return contain_tree
elif contain_tree == True:
return tree
return None
else:
if tree.key[0] == name:
return True
else:
return False
| mit | -6,524,453,870,722,641,000 | 27.860927 | 79 | 0.532354 | false |
johnwyles/PySpeak | pyspeak/commandline.py | 1 | 3124 | """
Commandline execute function
Example (already included in 'pyspeak' executable in this project):
#!/usr/bin/evn python
from pyspeak.commandline import execute
execute()
"""
import argparse
import logging
from pyspeak import __version__
from listener import Listener
from speaker import Speaker
def execute():
"""
Execute method called from the commandline executeable
"""
# Parse command line arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help='Subcommand for pyspeak to either listen for microphone input (speech-to-text) or text input (text-to-speech)')
# Setup Listener argument parser subcommand
parser_listener = subparsers.add_parser('listen', help='Listen for microphone input (speech-to-text)')
parser_listener.add_argument('-f', '--filename-prefix', default='pyspeak_file', help='Default prefix location and filename for the temporary file %(prog) uses to store data. This stores a .wav and .flac file of this prefix (e.g. "./pyspeak_file" => [./pyspeak_file.wav, ./pyspeak_file.flac])')
parser_listener.add_argument('-s', '--silence-time', type=int, default=2, help='Amount of silence time (in seconds) to listen for before dispatching voice data for recognition')
parser_listener.add_argument('-t', '--threshold', type=int, default=80, help='Threshold for detecting speech input; depending on your microphone settings you may need to experiment a bit with this value')
parser_listener.set_defaults(func=_listener)
# Setup Speaker argument parser subcommand
parser_speaker = subparsers.add_parser('speak', help='Listen for text input (text-to-speech')
parser_speaker.set_defaults(func=_speaker)
parser.add_argument('-l', '--loglevel', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Log level for console output')
parser.add_argument('-v', '--version', help='Get the version number and exit', action='store_true')
arguments = parser.parse_args()
# Print the version number and exit
if arguments.version:
print __name__ + ": " + __version__
exit(0)
# Setup logging
try:
number_level = getattr(logging, arguments.loglevel.upper(), None)
if not isinstance(number_level, int):
raise ValueError('Invalid log level: %s' % number_level)
except:
number_level
logging.basicConfig(format='[%(asctime)s] [%(name)s] [%(levelname)s]: %(message)s', level=number_level, nargs='?')
# Callback to argument parser subcommands
arguments.func(arguments)
def _listener(arguments):
"""
Listener subcommand callback
"""
logging.info('Starting Listener')
listener = Listener(silence_time=arguments.silence_time, threshold=arguments.threshold, filename_prefix=arguments.filename_prefix)
listener.get_utterance()
def _speaker(arguments):
"""
Speaker subcommand callback
"""
logging.info('Starting Speaker')
speaker = Speaker()
speaker.get_google_translation('en', 'Hello World!')
| gpl-3.0 | -7,192,280,384,282,254,000 | 39.571429 | 298 | 0.702945 | false |
Captricity/captricity-api-quickstarts | hackathon/fppc_data_munging_example.py | 1 | 6683 | import csv
import re
import time
from captools.api import Client
from cStringIO import StringIO
from pprint import PrettyPrinter
from collections import Counter
CAP_API_TOKEN = 'YOUR CAPTRICITY API TOKEN'
pp = PrettyPrinter(indent=4)
def create_fancy_csv_from_job(job_id, name):
# Initialize Captricity Python Client (installation instructions in README
# at https://github.com/Captricity/captools)
start = time.time()
client = Client(api_token=CAP_API_TOKEN)
# Read all the Instance Sets associated with this job
isets = client.read_instance_sets(job_id)
# For each Instance Set, we will pull down all the Shreds and record the
# transcribed value and generate a link to the Shred image.
all_iset_data = []
fields = {}
fields['0'] = 'Form Name'
fields['0.5'] = 'Form Name Image Link'
for iset in isets:
shreds = client.read_instance_set_shreds(iset['id'])
iset_data = {}
iset_data['0'] = iset['name']
for shred in shreds:
if '0.5' not in iset_data:
iset_data['0.5'] = 'https://shreddr.captricity.com/api/v1/instance/%s/aligned-image' % shred['instance_id']
# Key on Field id because Field name can be duplicated
field_id = shred['field']['id']
iset_data[str(field_id)] = shred['best_estimate'].encode('utf-8') if shred['best_estimate'] else None
iset_data[str(field_id + 0.5)] = 'https://shreddr.captricity.com/api/v1/shred/%s/image' % shred['id']
# We'll order export by Field ID, links will be (field_id + 0.5) so they will be next to the Field in CSV
fields[str(field_id)] = shred['field']['name']
fields[str(field_id + 0.5)] = shred['field']['name'] + ' Image Link'
all_iset_data.append(iset_data)
if len(all_iset_data) % 5 == 0:
print 'Done with %s Instance Sets from Job %s in %s sec, %s to go' % (len(all_iset_data), job_id, time.time() - start, len(isets) - len(all_iset_data))
# Export all the data as CSV
data_out = [fields] + all_iset_data
header = sorted(fields.keys())
if job_id in [3968, 4606]:
# No depivot for cover page or addenda
buffer = open('%s.csv' % name, 'w')
else:
buffer = StringIO()
csv_writer = csv.DictWriter(buffer, header, restval=u'--no data--')
csv_writer.writerows(data_out)
if job_id in [3968, 4606]:
buffer.close()
else:
buffer.seek(0)
depivot_data(csv.reader(buffer), '%s.csv' % name)
def depivot_data(reader, outfile_name):
"""
This takes in a csv and 'depivots' the data. This is useful when a single
row of the data actually includes multiple rows. This depivots the data
using the heuristic that when something can be depivoted, the field names
(column headers) are the same for each depivoted row (so that we have a 'name'
column for each depivoted row in the raw row).
"""
headers = reader.next()
header_counts = Counter()
# First count all the headers, to find candidates for depivoting
for header in headers:
header_counts[header] += 1
# Seperate out the singletons from the candidates for depivoting
singleton_headers = [k for k,v in header_counts.items() if v == 1]
# Figure out the column indices of each depivoted row group
singleton_column_index = {} # The column indices of the singletons
repeated_column_sets = [] # The column indices of each row group
leftmost = None
for i, header in enumerate(headers):
# Seperately track the singleton column indices
if header in singleton_headers:
singleton_column_index[header] = i
else:
# First, we want to find the left most column.
# This will be used to determine when we need to
# add another row group
if not leftmost:
leftmost = header
if leftmost == header:
repeated_column_sets.append({})
# Figure out the most likely row group this header belongs to
for x in repeated_column_sets:
if header not in x:
x[header] = i
break
# At this point we know how many row groups exist, and which headers
# correspond to each row group. We will use this information to construct
# the depivoted csv
# First, construct the new headers. This consists of all the singletons,
# and all the headers in one of the repeated column sets, with the
# repeated column headers coming before the singletons. Note that
# we will sort each header based on the original ordering.
new_headers = []
# Add the "depivoted" row headers
if len(repeated_column_sets) > 0:
tmp = repeated_column_sets[0]
tmp_new_headers = tmp.keys()
tmp_new_headers.sort(key=lambda x: tmp[x])
for t in tmp_new_headers:
new_headers.append(t)
# Add the singletons
new_singleton_headers = singleton_column_index.keys()
new_singleton_headers.sort(key=lambda x: singleton_column_index[x])
for h in new_singleton_headers[1:]:
new_headers.append(h)
# Keep the first column the same, since that includes the name of the row
new_headers.insert(0, new_singleton_headers[0])
# Construct the depivoted row
depivoted_csv_out = csv.DictWriter(open(outfile_name, 'w'), new_headers)
depivoted_csv_out.writeheader()
for row in reader:
# For each row, we want to extract the depivoted rows (if there are any that
# need depivoting). We will simply repeat the singletons in each depivoted row.
if len(repeated_column_sets) == 0:
depivoted_row = {k: row[v] for k,v in singleton_column_index.items()}
depivoted_csv_out.writerow(depivoted_row)
else:
for column_set in repeated_column_sets:
depivoted_row = {k: row[v] for k,v in singleton_column_index.items()}
depivoted_row.update({k : row[v] for k,v in column_set.items()})
depivoted_csv_out.writerow(depivoted_row)
if __name__ == '__main__':
for job_id, name in [
(3968, 'Cover Page'),
(3975, 'A-1 Form 700--Investment Disclosures'),
(3977, 'A-2 Form 700--Business Entity Ownership'),
(3978, 'B Form 700--Real Property Listings'),
(4036, 'C Form 700--Income Reporting'),
(3980, 'D Form 700--Gift Disclosures'),
(3981, 'E Form 700--Travel Payments'),
(4607, 'FPPC Judges Addenda')
]:
create_fancy_csv_from_job(job_id, name)
| mit | 4,663,777,163,566,287,000 | 43.258278 | 163 | 0.625617 | false |
nedbat/zellij | zellij/cmd.py | 1 | 6188 | """Command-line interface for Zellij."""
import math
import pprint
import click
from zellij.color import random_color, parse_color
from zellij.debug import debug_world, debug_click_options, should_debug
from zellij.design import get_design
from zellij.drawing import Drawing
from zellij.path import combine_paths, draw_paths, clip_paths, perturb_paths
from zellij.path_tiler import PathTiler
from zellij.strap import strapify
def size_type(s):
"""For specifying the size: either WxH, or W (square)"""
if 'x' in s:
width, height = s.split('x')
else:
width = height = s
return int(width.strip()), int(height.strip())
_common_options = {
'common':[
*debug_click_options,
],
'drawing': [
click.option('--output', help='File name to write to'),
click.option('--tiles', type=float, default=3, help='How many tiles to fit in the drawing'),
click.option('--size', type=size_type, default='800', help='Size of the output'),
click.option('--rotate', type=float, default=0, help='Angle to rotate the drawing'),
click.option('--background', type=parse_color, help='The color of the background'),
click.option('--format', help='The output format, png or svg'),
click.option('--perturb', type=float, default=0, help='A random amount to jostle points'),
click.argument('design'),
],
}
def common_options(category):
"""Provide a set of common options to a click command."""
def _wrapped(func):
# from: https://github.com/pallets/click/issues/108#issuecomment-194465429
for option in reversed(_common_options[category]):
func = option(func)
return func
return _wrapped
def start_drawing(opt, **drawing_args):
"""Make a Drawing based on the options passed."""
width, height = opt['size']
bg = opt['background']
def_bg = drawing_args.pop('bg', (1, 1, 1))
if bg is None:
bg = def_bg
name = opt['output']
def_name = drawing_args.pop('name', 'drawing')
format = opt['format']
dwg = Drawing(width, height, name=name or def_name, format=format, bg=bg, **drawing_args)
dwg.translate(width/2, height/2)
dwg.rotate(opt['rotate'])
dwg.translate(-width/2, -height/2)
return dwg
@click.group()
def clickmain():
"""Make Islamic-inspired geometric art."""
pass
@clickmain.command()
@common_options('common')
@common_options('drawing')
@click.option("--strap-width", type=float, default=6, help='Width of the straps, in tile-percent')
def straps(**opt):
"""Draw with over-under straps"""
dwg = start_drawing(opt, name="straps", bg=(.8, .8, .8))
tilew = int(dwg.width/opt['tiles'])
if opt['strap_width'] > 0:
strap_kwargs = dict(width=tilew * opt['strap_width'] / 100, random_factor=0)
else:
strap_kwargs = dict(width=tilew / 60, random_factor=4.9)
tiler = PathTiler(dwg)
design_class = get_design(opt['design'])
draw = design_class(tilew)
draw.draw(tiler)
paths_all = combine_paths(tiler.paths)
paths = clip_paths(paths_all, dwg.perimeter().bounds())
if opt['perturb']:
paths = perturb_paths(paths, opt['perturb'])
if should_debug('world'):
debug_world(dwg, paths_styles=[
(paths_all, dict(width=1, rgb=(.75, .75, .75))),
(paths, dict(width=1.5, rgb=(1, 0, 0))),
])
straps = strapify(paths, **strap_kwargs)
with dwg.style(rgb=(1, 1, 1)):
for strap in straps:
strap.sides[0].draw(dwg)
strap.sides[1].draw(dwg, append=True, reverse=True)
dwg.close_path()
dwg.fill()
with dwg.style(rgb=(0, 0, 0), width=2):
for strap in straps:
for side in strap.sides:
side.draw(dwg)
dwg.stroke()
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def candystripe(**opt):
"""Draw with crazy colors and a white stripe"""
dwg = start_drawing(opt, name="candy")
tilew = int(dwg.width/opt['tiles'])
tiler = PathTiler(dwg)
design_class = get_design(opt['design'])
draw = design_class(tilew)
draw.draw(tiler)
paths = combine_paths(tiler.paths)
LINE_WIDTH = tilew/4
dwg.multi_stroke(paths, [
#(LINE_WIDTH, (0, 0, 0)),
(LINE_WIDTH-2, random_color),
#(7, (0, 0, 0)),
(5, (1, 1, 1)),
])
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def diagram(**opt):
"""Draw the underlying structure of a design"""
width, height = opt['size']
tilew = int(width/opt['tiles'])
dwg = Drawing(width, height, name="diagram")
design_class = get_design(opt['design'])
draw = design_class(tilew)
# The full pattern.
tiler = PathTiler(dwg)
draw.draw(tiler)
with dwg.style(rgb=(.5, .5, .5)):
draw_paths(tiler.paths, dwg)
dwg.stroke()
# The symmetry.
tiler = PathTiler(dwg)
tiler.tile_p6m(draw.draw_tiler_unit, tilew)
with dwg.style(rgb=(1, .75, .75), width=1, dash=[5, 5]):
draw_paths(tiler.paths, dwg)
dwg.stroke()
def single_tiler():
"""Make a PathTiler right for drawing just one unit."""
tiler = PathTiler(dwg)
# TODO: make this work for other symmetries
tiler.pc.translate(2 * tilew * math.sqrt(3) / 2, tilew)
tiler.pc.reflect_xy(0, 0)
return tiler
# The tiler unit.
tiler = single_tiler()
draw.draw_tiler_unit(tiler.pc)
with dwg.style(rgb=(1, 0, 0), width=3):
draw_paths(tiler.paths, dwg)
dwg.stroke()
# The design.
tiler = single_tiler()
draw.draw_tile(tiler.pc)
with dwg.style(rgb=(0, 0, 0), width=6):
draw_paths(tiler.paths, dwg)
dwg.stroke()
dwg.finish()
@clickmain.command()
@common_options('common')
@common_options('drawing')
def show_opts(**opt):
"""Dump the provided options"""
pprint.pprint(opt)
def main():
"""The main Zellij entry point."""
try:
clickmain()
except:
#print("Whoops!")
raise
if __name__ == '__main__':
main()
| apache-2.0 | 2,686,110,141,197,884,400 | 27.385321 | 100 | 0.60488 | false |
adaptive-learning/matmat-web | matmat/management/commands/export2csv.py | 1 | 8381 | from collections import defaultdict
from contextlib import closing
import csv
import zipfile
import sys
from django.db import connection
import os
from django.core.management import BaseCommand, CommandError
import re
from proso_models.models import ItemRelation, Answer, AnswerMeta
from proso_tasks.models import Task, Context, TaskInstance, TaskAnswer, Skill
from matmat import settings
import pandas as pd
import json
class Command(BaseCommand):
args = 'table name'
help = "Export data"
BATCH_SIZE = 5 * 10**5
MODELS_TO_EXPORT = [Task, Context, TaskInstance, Skill, Answer, TaskAnswer, ItemRelation, AnswerMeta]
def __init__(self):
super(Command, self).__init__()
self.tables_to_export = []
for model in self.MODELS_TO_EXPORT:
self.tables_to_export.append(model._meta.db_table)
def handle(self, *args, **options):
if len(args) > 0 and len(args) != 1:
raise CommandError('''
The command requires exactly one arguments:
- table name
or no argument.
''')
if len(args) > 0:
table_name = args[0]
self.handle_one_table(table_name)
else:
self.handle_all_tables()
def handle_all_tables(self):
if not os.path.exists(os.path.join(settings.MEDIA_ROOT, "raw")):
os.makedirs(os.path.join(settings.MEDIA_ROOT, "raw"))
for table_name in self.tables_to_export:
self.handle_one_table(table_name)
prepare_data(input_dir=os.path.join(settings.MEDIA_ROOT, "raw"), output_dir=settings.MEDIA_ROOT)
filename_zip = os.path.join(settings.MEDIA_ROOT, "matmat_export_raw.zip")
files = [os.path.join(settings.MEDIA_ROOT, "raw", f + '.csv') for f in self.tables_to_export]
zip_files(filename_zip, files)
filename_zip = os.path.join(settings.MEDIA_ROOT, "matmat_export.zip")
files = [os.path.join(settings.MEDIA_ROOT, f + '.csv') for f in ["answers", "items", "skills"]]
zip_files(filename_zip, files)
def handle_one_table(self, table_name):
if table_name not in self.tables_to_export:
raise CommandError('table "%s" is not supported' % table_name)
count = 0
with closing(connection.cursor()) as cursor:
cursor.execute('SELECT COUNT(*) FROM ' + table_name)
count, = cursor.fetchone()
print('processing %s' % table_name, ',', count, 'items')
sql = 'SELECT * FROM ' + table_name
filename_csv = settings.MEDIA_ROOT + '/raw/' + table_name + '.csv'
for offset in range(0, count, self.BATCH_SIZE):
with closing(connection.cursor()) as cursor:
cursor.execute(sql + ' LIMIT ' + str(self.BATCH_SIZE) + ' OFFSET ' + str(offset))
self.dump_cursor(
cursor,
filename_csv,
append=(offset > 0))
def dump_cursor(self, cursor, filename, append=False):
headers = [re.sub(r'_id$', '', col[0]) for col in cursor.description]
with open(filename, 'a' if append else 'w', encoding='utf-8') as csvfile:
writer = csv.writer(csvfile)
if not append:
writer.writerow(headers)
for row in cursor:
writer.writerow(row)
def zip_files(filename_zip, files):
if os.path.exists(filename_zip):
os.remove(filename_zip)
zf = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in files:
zf.write(filename, os.path.basename(filename))
# os.remove(filename)
zf.close()
def get_skill_parents(skills, relations):
map = {}
for id, skill in skills.iterrows():
map[id] = int(skill['parent']) if not pd.isnull(skill['parent']) else None
return map
def get_skill_parent_lists(skills, relations):
map = get_skill_parents(skills, relations)
lists = defaultdict(lambda: [])
for skill in map:
s = skill
while s:
lists[skill].append(s)
s = map[s]
return lists
def parse_question(item, data):
if item["visualization"] == "pairing":
return ""
question = data["question"] if "question" in data else data["text"]
if type(question) is list and len(question) == 3 and type(question[0]) is str:
question = question[0]
if type(question) is list and len(question) == 3 and type(question[0]) is int:
question = "".join(map(str, question))
if type(question) is list and len(question) == 1:
question = question[0]
question = str(question).replace("×", "x").replace("÷", "/").replace(" ", "")
return question
def prepare_data(input_dir="data/source", output_dir="data"):
csv.field_size_limit(sys.maxsize)
answers = pd.read_csv(os.path.join(input_dir, "proso_models_answer.csv"), engine='python', index_col=0)
answers = answers.join(pd.read_csv(os.path.join(input_dir, "proso_tasks_taskanswer.csv"), engine='python', index_col=0))
answers = answers.join(pd.read_csv(os.path.join(input_dir, "proso_models_answermeta.csv"), engine='python', index_col=0), on='metainfo')
tasks = pd.read_csv(os.path.join(input_dir, "proso_tasks_task.csv"), index_col=0)
task_instances = pd.read_csv(os.path.join(input_dir, "proso_tasks_taskinstance.csv"), index_col=0)
contexts = pd.read_csv(os.path.join(input_dir, "proso_tasks_context.csv"), index_col=0)
skills = pd.read_csv(os.path.join(input_dir, "proso_tasks_skill.csv"), index_col=0)
relations = pd.read_csv(os.path.join(input_dir, "proso_models_itemrelation.csv"), index_col=0)
skills = skills.join(relations.set_index('child').parent, on='item')
for id, skill in skills.iterrows():
skill_id = skills.loc[skills['item'] == skill['parent']].index[0] if not pd.isnull(skill["parent"]) else None
skills.loc[id, 'parent'] = skill_id
skill_parents = get_skill_parent_lists(skills, relations)
items = task_instances.join(tasks, on='task', rsuffix='_task')
items = items.join(contexts, on='context', rsuffix='_context')
items["answer"] = 0
items["question"] = ""
items["skill_lvl_1"], items["skill_lvl_2"], items["skill_lvl_3"] = None, None, None
for id, item in items.iterrows():
data = json.loads(item["content"])
items.loc[id, "content"] = item["content"].replace('"', "'")
items.loc[id, "answer"] = int(data["answer"]) if item["identifier_context"] != "pairing" else None
items.loc[id, "question"] = item['identifier_task']
skill_item = relations.loc[relations['child'] == item['item_task'], 'parent'].data[0]
skill = skills.loc[skills['item'] == skill_item].index.tolist()[0]
items.loc[id, "skill"] = skill
for i, skill in enumerate(skill_parents[skill][::-1][1:]):
items.loc[id, "skill_lvl_{}".format(i + 1)] = skill
items["skill"] = items["skill"].astype(int)
items.rename(inplace=True, columns={"identifier_context": "visualization", 'content': 'data'})
answers['correct'] = 1 * (answers['item_asked'] == answers['item_answered'])
answers = answers.join(pd.Series(data=items.index, index=items['item'], name='item_id'), on='item')
answers = answers.join(items[["answer"]], on="item_id", rsuffix="_expected")
del answers['item']
answers.rename(inplace=True, columns={"user": "student", 'content': 'log', 'item_id': 'item'})
answers = answers[["time", "item", "student", "session", "response_time", "correct", "answer", "answer_expected", "log"]]
answers['random'] = 1 * answers['log'].str.contains('random_without_options')
answers = answers.round({"response_time": 3})
skills.rename(inplace=True, columns={"note": "name_cz",})
skills = skills[['identifier', "name", "parent"]]
contexts.rename(inplace=True, columns={"note": "name_cz",})
answers.to_csv(os.path.join(output_dir, "answers.csv"), float_format="%.0f", encoding='utf-8')
items = items[["question", "answer", "visualization", "skill", "skill_lvl_1", "skill_lvl_2", "skill_lvl_3", "data"]]
items.to_csv(os.path.join(output_dir, "items.csv"), encoding='utf-8')
# contexts.to_csv(os.path.join(output_dir, "visualizations.csv"))
skills.to_csv(os.path.join(output_dir, "skills.csv"), float_format="%.0f", encoding='utf-8')
| mit | -7,513,176,340,553,469,000 | 43.110526 | 140 | 0.624031 | false |
joajfreitas/bookmarks | marcador/marcador_lib.py | 1 | 6761 | import os
import sqlite3
from subprocess import call
class Database:
def __init__(self, filename):
self.filename = filename
self.conn = self.open_database(self.filename)
self.cursor = self.conn.cursor()
def open_db(self, filename):
return sqlite3.connect(filename)
def set_default_db(self, filename):
conn = self.open_db(filename)
c = conn.cursor()
c.execute(
"""CREATE TABLE bookmarks (
identifier INTEGER PRIMARY KEY,
url TEXT,
description TEXT,
count INTEGER,
thumbnail TEXT,
score REAL)
"""
)
c.execute(
"""CREATE TABLE tags (
identifier INTEGER PRIMARY KEY,
tag TEXT)
"""
)
c.execute(
"""CREATE TABLE bookmarks_tags (
bookmark REFERENCES bookmarks(identifier),
tag REFERENCES tags(identifier))
"""
)
conn.commit()
return conn
def open_database(self, filename):
if not os.path.isfile(filename):
return self.set_default_db(filename)
return self.open_db(filename)
def get_bookmarks(self, sorted=False):
self.cursor.execute(
"""select identifier, url, description, thumbnail, count from bookmarks""" + (" order by score DESC" if sorted else "")
)
bookmarks = self.cursor.fetchall()
for id, url, desc, thumbnail, count in bookmarks:
tags = self.get_bookmark_tags(id)
tags = [tag for tag, id in tags]
yield id, url, thumbnail, tags
def open_bookmark(self, id):
self.cursor.execute(f"select url, count from bookmarks where identifier='{id}'")
url, count = self.cursor.fetchone()
self.hit_url(url)
import webbrowser
webbrowser.open(url)
def add_bookmark(self, url, tags):
self.cursor.execute(f'insert into bookmarks (url,count,score) values ("{url}",0,1)')
book_id = self.cursor.lastrowid
for tag in tags:
self.cursor.execute(f'insert into tags (tag) values ("{tag}")')
tag_id = self.cursor.lastrowid
self.cursor.execute(
f"insert into bookmarks_tags (bookmark, tag) values ({book_id}, {tag_id})"
)
self.conn.commit()
def rm_bookmark(self, id):
self.cursor.execute(
f"delete from bookmarks_tags as bt where bt.bookmark = {id}"
)
self.cursor.execute(f"delete from bookmarks where identifier = {id}")
self.conn.commit()
def get_url(self, id):
if id == 0:
return None
self.cursor.execute(f"select url from bookmarks where identifier={id}")
url = self.cursor.fetchone()
return url
def get_bookmark(self, id):
self.cursor.execute(
f"""select identifier, url, description, thumbnail, count
from bookmarks where identifier={id}"""
)
id, url, desc, thumbnail, count = self.cursor.fetchone()
return id, url, desc, thumbnail, count
def set_bookmark(self, id, url, tags):
self.cursor.execute(f"update bookmarks set url='{url}' where identifier={id}")
tag_set = self.bookmark_tag_list()
_tags = [tag for tag in tags if tag not in tag_set]
for tag in _tags:
self.cursor.execute(f"insert into tags (tag) values ('{tag}')")
self.cursor.execute(f"delete from bookmarks_tags as bt where bt.bookmark={id}")
for tag in tags:
tag_id = self.get_tag_id(tag)
self.cursor.execute(
f"insert into bookmarks_tags as bt values ({id},{tag_id})"
)
self.conn.commit()
def set_thumbnail(self, id, thumbnail):
self.cursor.execute(
f"update bookmarks set thumbnail='{thumbnail}' where identifier={id}"
)
self.conn.commit()
def edit_bookmark(self, id):
id, url, desc, thumbnail, count = self.get_bookmark(id)
tags = self.get_bookmark_tags(id)
tmp_file = "/tmp/bookmarks.tmp"
with open(tmp_file, "w") as tmp:
tmp.write(url + "\n")
for tag, tag_id in tags:
tmp.write(tag + "\n")
term = os.path.expandvars("$TERM")
editor = os.path.expandvars("$EDITOR")
call([term, "-e", editor, tmp_file])
with open(tmp_file, "r") as tmp:
lines = tmp.readlines()
lines = [l.strip("\n") for l in lines if l != ""]
url = lines[0]
tags = [tag for tag in lines[1:]]
self.set_bookmark(id, url, tags)
def get_bookmark_tags(self, id):
self.cursor.execute(
f"""select tags.tag, tags.identifier from
bookmarks_tags as bt, tags where bt.bookmark={id} and bt.tag = tags.identifier"""
)
return list(self.cursor.fetchall())
def bookmark_tag_search(self, tag):
self.cursor.execute(f"select identifier from tags where tag='{tag}'")
r = self.cursor.fetchone()
if r == None:
return []
id = r[0]
self.cursor.execute(
f"select bt.bookmark from bookmarks_tags as bt where bt.tag = {id}"
)
bookmarks = self.cursor.fetchall()
for _book in bookmarks:
book = _book[0]
self.cursor.execute(
f"""select identifier, url, description, count
from bookmarks where identifier = {book}""")
id, url, desc, count = self.cursor.fetchone()
yield id, url, desc, count
def bookmark_tag_list(self):
self.cursor.execute("select tag from tags")
tags = self.cursor.fetchall()
for tag in tags:
yield tag[0]
def get_tag_id(self, tag):
self.cursor.execute(f"select identifier from tags where tag='{tag}'")
r = self.cursor.fetchone()
return None if r == None else r[0]
def hit_url(self, url):
self.cursor.execute(f"select identifier, count, score from bookmarks where url='{url}'")
id, count, score = self.cursor.fetchone()
count = int(count)
count += 1
score += 1
self.cursor.execute(f"update bookmarks set score = score*0.95 where identifier<>'{id}'")
self.cursor.execute(
f"update bookmarks set count = {count}, score = {score} where identifier='{id}'")
self.conn.commit()
def bookmark_to_str(bookmark):
id, url, thumbnail, tags = bookmark
output = f"{id}, {url} "
for tag in tags:
output += f"{tag},"
output = output[:-1] + "\n"
return output
| gpl-3.0 | -4,935,841,480,642,641,000 | 29.454955 | 131 | 0.560716 | false |
shawncaojob/LC | PY/85_maximal_rectangle.py | 1 | 6263 | # 85. Maximal Rectangle QuestionEditorial Solution My Submissions
# Total Accepted: 50686
# Total Submissions: 202962
# Difficulty: Hard
# Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.
#
# For example, given the following matrix:
#
# 1 0 1 0 0
# 1 0 1 1 1
# 1 1 1 1 1
# 1 0 0 1 0
# Return 6.
# Subscribe to see which companies asked this question
# 2018.02.24 Used a list comprehension
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
res = 0
heights = [0] * n
for i in xrange(m):
heights = [ x + int(y) if y != "0" else 0 for x,y in zip(heights, matrix[i])]
res = max(res, self.getMaxHeight(heights))
return res
def getMaxHeight(self, heights):
res = 0
heights = heights + [0]
d = deque()
for i in xrange(len(heights)):
while d and heights[i] < heights[d[-1]]:
h = heights[d.pop()]
left = d[-1] if d else -1
res = max(res, h * (i - left - 1))
d.append(i)
return res
# 2017.03.18 One stack solution
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = [ 0 for y in xrange(n + 1) ]
res = 0
for i in xrange(m):
for j in xrange(n):
heights[j] = 0 if matrix[i][j] == "0" else heights[j] + int(matrix[i][j])
res = max(res, self.maxArea(heights))
return res
def maxArea(self, heights):
res = 0
d = deque()
for i in xrange(len(heights)):
while d and heights[d[-1]] >= heights[i]:
h = heights[d.pop()]
side = d[-1] if d else -1
res = max(res, h * (i - side - 1))
d.append(i)
return res
# 12.30.2016 rewrite
from collections import deque
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
res = 0
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = [ [ 0 for j in xrange(n + 1) ] for i in xrange(m) ]
for i in xrange(m):
for j in xrange(n):
if i == 0 and matrix[i][j] == "1":
heights[i][j] = 1
elif matrix[i][j] == "1":
heights[i][j] += heights[i-1][j] + 1
else:
pass
for i in xrange(m):
d = deque()
for j in xrange(n + 1):
while d and heights[i][j] < heights[i][d[-1]]:
index = d.pop()
h = heights[i][index]
l = -1 if not d else d[-1]
side = j - l - 1
res = max(res, h * side)
d.append(j)
return res
# 11.29.2016 Rewrite
class Solution(object):
def maximalRectangle(self, matrix):
"""
:type matrix: List[List[str]]
:rtype: int
"""
if not matrix: return 0
m, n = len(matrix), len(matrix[0])
heights = []
res = 0
for i in xrange(m):
if i == 0:
heights = [ int(digit) for digit in matrix[0] ]
heights.append(0)
else:
for j in xrange(n):
if matrix[i][j] == "1":
heights[j] += int(matrix[i][j])
else:
heights[j] = 0
d = []
j, l = 0, -1
while j < len(heights):
while d and heights[d[-1]] >= heights[j]:
index = d.pop()
h = heights[index]
if d:
l = d[-1]
else:
l = -1
res = max(res, h * (j - 1 - l))
d.append(j)
j += 1
return res
if __name__ == "__main__":
A = ["10100","10111","11111","10010"]
print(Solution().maximalRectangle(A))
#
#
# class Solution2(object):
# def maximalRectangle(self, matrix):
# """
# :type matrix: List[List[str]]
# :rtype: int
# """
# if not matrix:
# return 0
# res, m, n = 0, len(matrix), len(matrix[0])
#
# # Initialize first height
# H = list(matrix[0]) # Convert string to list of int
# for j in xrange(n):
# H[j] = int(H[j])
#
# for i in xrange(m):
# #initiate L, R
# L = [0 for x in xrange(n)]
# R = [0 for x in xrange(n)]
#
# # Get the height and left
# for j in xrange(n):
# if i == 0:
# pass
# elif matrix[i][j] == "1":
# H[j] += 1
# else:
# H[j] = 0
#
# # Get the left
# k = j - 1
# while k >= 0 and H[k] >= H[j]:
# L[j] = L[j] + L[k] + 1
# k = k - L[k] - 1
#
# # Get the right
# for j in reversed(xrange(n)):
# k = j + 1
# while k < n and H[j] <= H[k]:
# R[j] = R[j] + R[k] + 1
# k = k + R[k] + 1
#
# # Calculate area for each and update res if bigger
# for j in xrange(n):
# if H[j] != 0:
# res = max(res, H[j] * (L[j] + R[j] + 1))
#
# return res
| gpl-3.0 | -2,447,503,830,545,572,400 | 28.266355 | 119 | 0.406355 | false |
wisechengyi/pants | tests/python/pants_test/backend/jvm/tasks/test_checkstyle.py | 1 | 5605 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from textwrap import dedent
from pants.backend.jvm.subsystems.checkstyle import Checkstyle as CheckstyleSubsystem
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.checkstyle import Checkstyle
from pants.base.exceptions import TaskError
from pants.build_graph.address import Address
from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase
from pants.testutil.task_test_base import ensure_cached
class CheckstyleTest(NailgunTaskTestBase):
"""Tests for the class Checkstyle."""
_RULE_XML_FILE_TAB_CHECKER = dedent(
"""
<module name="FileTabCharacter"/>
"""
)
_RULE_XML_SUPPRESSION_FILTER = dedent(
"""
<module name="SuppressionFilter">
<property name="file" value="${checkstyle.suppression.file}"/>
</module>
"""
)
_TEST_JAVA_SOURCE_WITH_NO_TAB = dedent(
"""
public class HelloMain {
public static void main(String[] args) throws IOException {
System.out.println("A test.");
}
}
"""
)
_TEST_JAVA_SOURCE_WITH_TAB = dedent(
"""
public class HelloMain {
public static void main(String[] args) throws IOException {
\tSystem.out.println("A test.");
}
}
"""
)
@classmethod
def task_type(cls):
return Checkstyle
def _create_context(self, rules_xml=(), properties=None, target_roots=None):
return self.context(
options={
self.options_scope: {
"bootstrap_tools": ["//:checkstyle"],
"properties": properties or {},
},
CheckstyleSubsystem.options_scope: {"config": self._create_config_file(rules_xml),},
},
target_roots=target_roots,
)
def _create_config_file(self, rules_xml=()):
return self.create_file(
relpath="coding_style.xml",
contents=dedent(
"""<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
{rules_xml}
</module>""".format(
rules_xml="\n".join(rules_xml)
)
),
)
def _create_suppression_file(self, suppresses_xml=()):
return self.create_file(
relpath="suppression.xml",
contents=dedent(
"""<?xml version="1.0"?>
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
{suppresses_xml}
</suppressions>
""".format(
suppresses_xml="\n".join(suppresses_xml)
)
),
)
def _create_target(self, name, test_java_source):
rel_dir = os.path.join("src/java", name)
self.create_file(relpath=os.path.join(rel_dir, f"{name}.java"), contents=test_java_source)
return self.make_target(
Address(spec_path=rel_dir, target_name=name).spec, JavaLibrary, sources=[f"{name}.java"]
)
#
# Test section
#
@ensure_cached(Checkstyle, expected_num_artifacts=1)
def test_single_rule_pass(self):
no_tab = self._create_target("no_tab", self._TEST_JAVA_SOURCE_WITH_NO_TAB)
context = self._create_context(
rules_xml=[self._RULE_XML_FILE_TAB_CHECKER], target_roots=[no_tab]
)
self.populate_runtime_classpath(context=context)
self.execute(context)
@ensure_cached(Checkstyle, expected_num_artifacts=0)
def test_single_rule_fail(self):
with_tab = self._create_target("with_tab", self._TEST_JAVA_SOURCE_WITH_TAB)
context = self._create_context(
rules_xml=[self._RULE_XML_FILE_TAB_CHECKER], target_roots=[with_tab]
)
# add a tab in the source to trigger the tab check rule to fail.
self.populate_runtime_classpath(context=context)
with self.assertRaises(TaskError):
self.execute(context)
def test_suppressions(self):
# For this test, we:
# - add 3 java files, 2 with tabs, 1 without.
# - add 2 suppression rules against those 2 java files with tabs,
# so we can test the logic of suppression.
suppression_file = self._create_suppression_file(
[
'<suppress files=".*with_tab_1\\.java" checks=".*" />',
'<suppress files=".*with_tab_2\\.java" checks=".*" />',
]
)
no_tab = self._create_target("no_tab", self._TEST_JAVA_SOURCE_WITH_NO_TAB)
with_tab_1 = self._create_target("with_tab_1", self._TEST_JAVA_SOURCE_WITH_TAB)
with_tab_2 = self._create_target("with_tab_2", self._TEST_JAVA_SOURCE_WITH_TAB)
context = self._create_context(
rules_xml=[self._RULE_XML_SUPPRESSION_FILTER, self._RULE_XML_FILE_TAB_CHECKER],
properties={"checkstyle.suppression.file": suppression_file,},
target_roots=[no_tab, with_tab_1, with_tab_2],
)
self.populate_runtime_classpath(context=context)
self.execute(context)
| apache-2.0 | 5,961,465,327,693,609,000 | 34.474684 | 100 | 0.575022 | false |
lanmaster53/honeybadger | server/honeybadger/__init__.py | 1 | 2391 | from flask import Flask
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
import logging
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-gk", "--googlekey", dest="googlekey", type=str, default='',
help="Google API Key")
parser.add_argument("-ik", "--ipstackkey", dest="ipstackkey", type=str, default='',
help="IPStack API Key")
opts = parser.parse_args()
basedir = os.path.abspath(os.path.dirname(__file__))
# configuration
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data.db')
DEBUG = True
SECRET_KEY = 'development key'
SQLALCHEMY_TRACK_MODIFICATIONS = False
GOOGLE_API_KEY = opts.googlekey # Provide your google api key via command-line argument
IPSTACK_API_KEY = opts.ipstackkey
app = Flask(__name__)
app.config.from_object(__name__)
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
# Logger cannot be imported until the db is initialized
from honeybadger.utils import Logger
logger = Logger()
if __name__ != '__main__':
gunicorn_logger = logging.getLogger('gunicorn.error')
# only use handler if gunicorn detected, otherwise default
if gunicorn_logger.handlers:
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
from honeybadger import models
from honeybadger import views
def initdb(username, password):
db.create_all()
import binascii
u = models.User(email=username, password_hash=bcrypt.generate_password_hash(binascii.hexlify(password.encode())), role=0, status=1)
db.session.add(u)
db.session.commit()
print('Database initialized.')
# remove below for production
t = models.Target(name='demo', guid='aedc4c63-8d13-4a22-81c5-d52d32293867')
db.session.add(t)
db.session.commit()
b = models.Beacon(target_guid='aedc4c63-8d13-4a22-81c5-d52d32293867', agent='HTML', ip='1.2.3.4', port='80', useragent='Mac OS X', comment='this is a comment.', lat='38.2531419', lng='-85.7564855', acc='5')
db.session.add(b)
db.session.commit()
b = models.Beacon(target_guid='aedc4c63-8d13-4a22-81c5-d52d32293867', agent='HTML', ip='5.6.7.8', port='80', useragent='Mac OS X', comment='this is a comment.', lat='34.855117', lng='-82.114192', acc='1')
db.session.add(b)
db.session.commit()
def dropdb():
db.drop_all()
print('Database dropped.')
| gpl-3.0 | -8,402,595,862,418,852,000 | 36.359375 | 210 | 0.695943 | false |
mdaal/rap | rap/sweeps/data_management/load_hf5_2.py | 1 | 1682 | from .utils import _define_sweep_data_columns
import tables
import os
import logging
def load_hf5_2(metadata, hf5_database_path, tablepath):
''' This function is for loading data taken with KIDs_DAQ_75uW. It use the columns defined in that hf5 file to
define the columns in sweep_data_columns
table path is path to the database to be loaded starting from root. e.g. load_hf5('/Run44b/T201312102229')
hf5_database_path is the name of the hf5 database to be accessed for the table informaiton'''
if not os.path.isfile(hf5_database_path):
logging.error('Speficied h5 database does not exist. Aborting...')
return
wmode = 'a'
# use "with" context manage to ensure file is always closed. no need for fileh.close()
with tables.open_file(hf5_database_path, mode = wmode) as fileh:
table = fileh.get_node(tablepath)
Sweep_Array = table.read()
for key in table.attrs.keys:
exec('metadata.{0} = table.attrs.{0}'.format(key))
imported_sweep_data_columns = Sweep_Array.dtype
fsteps = imported_sweep_data_columns['Frequencies'].shape[0]
tpoints = imported_sweep_data_columns['Temperature_Readings'].shape[0]
sweep_data_columns_list, sweep_data_columns = _define_sweep_data_columns(fsteps, tpoints)
for name in imported_sweep_data_columns.names:
if name not in sweep_data_columns.names:
sweep_data_columns_list.append((name,imported_sweep_data_columns[name] ))
sweep_data_columns = np.dtype(sweep_data_columns_list)
Sweep_Array = np.array(Sweep_Array, dtype = sweep_data_columns)
return Sweep_Array, sweep_data_columns, sweep_data_columns_list
| mit | 3,572,133,839,036,354,000 | 41.05 | 114 | 0.70214 | false |
ecederstrand/exchangelib | tests/test_items/test_bulk.py | 1 | 7409 | import datetime
from exchangelib.errors import ErrorItemNotFound, ErrorInvalidChangeKey, ErrorInvalidIdMalformed
from exchangelib.fields import FieldPath
from exchangelib.folders import Inbox, Folder, Calendar
from exchangelib.items import Item, Message, SAVE_ONLY, SEND_ONLY, SEND_AND_SAVE_COPY, CalendarItem
from .test_basics import BaseItemTest
class BulkMethodTest(BaseItemTest):
TEST_FOLDER = 'inbox'
FOLDER_CLASS = Inbox
ITEM_CLASS = Message
def test_fetch(self):
item = self.get_test_item()
self.test_folder.bulk_create(items=[item, item])
ids = self.test_folder.filter(categories__contains=item.categories)
items = list(self.account.fetch(ids=ids))
for item in items:
self.assertIsInstance(item, self.ITEM_CLASS)
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=['subject']))
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=[FieldPath.from_string('subject', self.test_folder)]))
self.assertEqual(len(items), 2)
items = list(self.account.fetch(ids=ids, only_fields=['id', 'changekey']))
self.assertEqual(len(items), 2)
def test_no_account(self):
# Test bulk operations on items with no self.account
item = self.get_test_item()
item.account = None
res = self.test_folder.bulk_create(items=[item])[0]
item.id, item.changekey = res.id, res.changekey
item.account = None
self.assertEqual(list(self.account.fetch(ids=[item]))[0].id, item.id)
item.account = None
res = self.account.bulk_update(items=[(item, ('subject',))])[0]
item.id, item.changekey = res
item.account = None
res = self.account.bulk_copy(ids=[item], to_folder=self.account.trash)[0]
item.id, item.changekey = res
item.account = None
res = self.account.bulk_move(ids=[item], to_folder=self.test_folder)[0]
item.id, item.changekey = res
item.account = None
self.assertEqual(self.account.bulk_delete(ids=[item]), [True])
item = self.get_test_item().save()
item.account = None
self.assertEqual(self.account.bulk_send(ids=[item]), [True])
def test_empty_args(self):
# We allow empty sequences for these methods
self.assertEqual(self.test_folder.bulk_create(items=[]), [])
self.assertEqual(list(self.account.fetch(ids=[])), [])
self.assertEqual(self.account.bulk_create(folder=self.test_folder, items=[]), [])
self.assertEqual(self.account.bulk_update(items=[]), [])
self.assertEqual(self.account.bulk_delete(ids=[]), [])
self.assertEqual(self.account.bulk_send(ids=[]), [])
self.assertEqual(self.account.bulk_copy(ids=[], to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move(ids=[], to_folder=self.account.trash), [])
self.assertEqual(self.account.upload(data=[]), [])
self.assertEqual(self.account.export(items=[]), [])
def test_qs_args(self):
# We allow querysets for these methods
qs = self.test_folder.none()
self.assertEqual(list(self.account.fetch(ids=qs)), [])
with self.assertRaises(ValueError):
# bulk_create() does not allow queryset input
self.account.bulk_create(folder=self.test_folder, items=qs)
with self.assertRaises(ValueError):
# bulk_update() does not allow queryset input
self.account.bulk_update(items=qs)
self.assertEqual(self.account.bulk_delete(ids=qs), [])
self.assertEqual(self.account.bulk_send(ids=qs), [])
self.assertEqual(self.account.bulk_copy(ids=qs, to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move(ids=qs, to_folder=self.account.trash), [])
self.assertEqual(self.account.upload(data=qs), [])
self.assertEqual(self.account.export(items=qs), [])
def test_no_kwargs(self):
self.assertEqual(self.test_folder.bulk_create([]), [])
self.assertEqual(list(self.account.fetch([])), [])
self.assertEqual(self.account.bulk_create(self.test_folder, []), [])
self.assertEqual(self.account.bulk_update([]), [])
self.assertEqual(self.account.bulk_delete([]), [])
self.assertEqual(self.account.bulk_send([]), [])
self.assertEqual(self.account.bulk_copy([], to_folder=self.account.trash), [])
self.assertEqual(self.account.bulk_move([], to_folder=self.account.trash), [])
self.assertEqual(self.account.upload([]), [])
self.assertEqual(self.account.export([]), [])
def test_invalid_bulk_args(self):
# Test bulk_create
with self.assertRaises(ValueError):
# Folder must belong to account
self.account.bulk_create(folder=Folder(root=None), items=[1])
with self.assertRaises(AttributeError):
# Must have folder on save
self.account.bulk_create(folder=None, items=[1], message_disposition=SAVE_ONLY)
# Test that we can send_and_save with a default folder
self.account.bulk_create(folder=None, items=[], message_disposition=SEND_AND_SAVE_COPY)
with self.assertRaises(AttributeError):
# Must not have folder on send-only
self.account.bulk_create(folder=self.test_folder, items=[1], message_disposition=SEND_ONLY)
# Test bulk_update
with self.assertRaises(ValueError):
# Cannot update in send-only mode
self.account.bulk_update(items=[1], message_disposition=SEND_ONLY)
def test_bulk_failure(self):
# Test that bulk_* can handle EWS errors and return the errors in order without losing non-failure results
items1 = [self.get_test_item().save() for _ in range(3)]
items1[1].changekey = 'XXX'
for i, res in enumerate(self.account.bulk_delete(items1)):
if i == 1:
self.assertIsInstance(res, ErrorInvalidChangeKey)
else:
self.assertEqual(res, True)
items2 = [self.get_test_item().save() for _ in range(3)]
items2[1].id = 'AAAA=='
for i, res in enumerate(self.account.bulk_delete(items2)):
if i == 1:
self.assertIsInstance(res, ErrorInvalidIdMalformed)
else:
self.assertEqual(res, True)
items3 = [self.get_test_item().save() for _ in range(3)]
items3[1].id = items1[0].id
for i, res in enumerate(self.account.fetch(items3)):
if i == 1:
self.assertIsInstance(res, ErrorItemNotFound)
else:
self.assertIsInstance(res, Item)
class CalendarBulkMethodTest(BaseItemTest):
TEST_FOLDER = 'calendar'
FOLDER_CLASS = Calendar
ITEM_CLASS = CalendarItem
def test_no_account(self):
# Test corner cases with bulk operations on items with no self.account
item = self.get_test_item()
item.recurrence = None
item.is_all_day = True
item.start, item.end = datetime.date(2020, 1, 1), datetime.date(2020, 1, 2)
item.account = None
res = self.test_folder.bulk_create(items=[item])[0]
item.id, item.changekey = res.id, res.changekey
item.account = None
self.account.bulk_update(items=[(item, ('start',))])
| bsd-2-clause | -3,528,518,229,622,913,500 | 46.191083 | 115 | 0.634364 | false |
luzhuomi/collamine-client-python | webapp/webapp/settings.py | 1 | 2832 | """
Django settings for webapp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# Django settings for moodtweet project.
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vw_+8()m^o3mxkqxcu%n#$^gjqx8_qn$p&#krg3(+a8cq^1ty&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crawler'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'webapp.urls'
WSGI_APPLICATION = 'webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djangocrawler',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
BASE_DIR + 'webapp/static/',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| apache-2.0 | 8,016,252,388,269,246,000 | 23.413793 | 75 | 0.707627 | false |
pu6ki/elsyser | materials/tests.py | 1 | 16162 | from django.contrib.auth.models import User
from rest_framework.test import APITestCase, APIClient
from rest_framework.reverse import reverse
from rest_framework import status
from students.models import Class, Subject, Student, Teacher
from .serializers import MaterialSerializer
from .models import Material
class MaterialsViewSetTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.list_view_name = 'materials:nested_materials-list'
self.detail_view_name = 'materials:nested_materials-detail'
self.serializer_class = MaterialSerializer
self.clazz = Class.objects.create(number=10, letter='A')
self.subject = Subject.objects.create(title='test_subject')
self.student_user = User.objects.create(username='test', password='pass')
self.student = Student.objects.create(user=self.student_user, clazz=self.clazz)
self.teacher_user = User.objects.create(username='author', password='pass123')
self.teacher = Teacher.objects.create(user=self.teacher_user, subject=self.subject)
self.material = Material.objects.create(
title='test material',
section='test material section',
content='test material content',
class_number=self.clazz.number,
subject=self.subject,
author=self.teacher
)
def test_materials_list_with_anonymous_user(self):
response = self.client.get(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id})
)
self.assertEqual(response.data['detail'], 'Authentication credentials were not provided.')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_materials_detail_with_anonymous_user(self):
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
)
)
self.assertEqual(
response.data['detail'],
'Authentication credentials were not provided.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_materials_list_with_authenticated_user(self):
self.client.force_authenticate(user=self.student_user)
response = self.client.get(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id})
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_materials_detail_with_authenticated_user(self):
self.client.force_authenticate(user=self.student_user)
response = self.client.get(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_materials_creation_with_student_account(self):
self.client.force_authenticate(user=self.student_user)
self.material.title = 'test title'
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(
response.data['detail'],
'Only teachers are allowed to view and modify this content.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_materials_creation_with_too_short_title(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = '.'
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(response.data['title'], ['Ensure this field has at least 3 characters.'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_creation_with_too_long_title(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title' * 150
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(
response.data['title'], ['Ensure this field has no more than 150 characters.']
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_creation_with_too_short_section(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.section = '.'
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(
response.data['section'],
['Ensure this field has at least 3 characters.']
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_creation_with_too_long_section(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.section = 'test title' * 150
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(
response.data['section'], ['Ensure this field has no more than 150 characters.']
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_creation_with_blank_content(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.content = ''
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(response.data['content'], ['This field may not be blank.'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_creation_with_valid_data(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title'
self.material.section = 'test section'
self.material.content = 'test content'
post_data = self.serializer_class(self.material).data
response = self.client.post(
reverse(self.list_view_name, kwargs={'subject_pk': self.material.subject.id}),
post_data,
format='json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_materials_update_with_student_account(self):
self.client.force_authenticate(user=self.student_user)
self.material.title = 'test title'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(
response.data['detail'], 'Only teachers are allowed to view and modify this content.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_materials_update_with_invalid_subject_id(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id + 1, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(response.data['detail'], 'Not found.')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_materials_update_with_invalid_id(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id + 1}
),
put_data,
format='json'
)
self.assertEqual(response.data['detail'], 'Not found.')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_materials_update_with_too_short_title(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = '.'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(response.data['title'], ['Ensure this field has at least 3 characters.'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_update_with_too_long_title(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title' * 150
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(
response.data['title'], ['Ensure this field has no more than 150 characters.']
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_update_with_too_short_section(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.section = '.'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(response.data['section'], ['Ensure this field has at least 3 characters.'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_update_with_too_long_section(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.section = 'test title' * 150
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(
response.data['section'], ['Ensure this field has no more than 150 characters.']
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_update_with_blank_content(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.content = ''
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(response.data['content'], ['This field may not be blank.'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_materials_update_with_valid_data(self):
self.client.force_authenticate(user=self.teacher_user)
self.material.title = 'test title'
self.material.section = 'test section'
self.material.content = 'test content'
put_data = self.serializer_class(self.material).data
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
put_data,
format='json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_materials_update_of_another_user(self):
self.client.force_authenticate(user=self.teacher_user)
new_user = User.objects.create(username='test2', password='pass')
new_teacher = Teacher.objects.create(user=new_user, subject=self.subject)
self.material.author = new_teacher
self.material.save()
response = self.client.put(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
{'topic': 'test topic'},
format='json'
)
self.assertEqual(
response.data['detail'],
'You should be the author of this content in order to modify it.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_materials_deletion_with_invalid_subject_id(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.delete(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id + 1, 'pk': self.material.id}
)
)
self.assertEqual(response.data['detail'], 'Not found.')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_materials_deletion_with_invalid_id(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.delete(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id + 1}
)
)
self.assertEqual(response.data['detail'], 'Not found.')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_materials_deletion_of_another_user(self):
self.client.force_authenticate(user=self.teacher_user)
new_user = User.objects.create(username='test2', password='pass')
new_teacher = Teacher.objects.create(user=new_user, subject=self.subject)
self.material.author = new_teacher
self.material.save()
response = self.client.delete(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
)
self.assertEqual(
response.data['detail'],
'You should be the author of this content in order to modify it.'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_materials_deletion(self):
self.client.force_authenticate(user=self.teacher_user)
response = self.client.delete(
reverse(
self.detail_view_name,
kwargs={'subject_pk': self.material.subject.id, 'pk': self.material.id}
),
)
self.assertEqual(Material.objects.count(), 0)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
| mit | 5,740,695,154,396,887,000 | 36.49884 | 100 | 0.6136 | false |
SamHames/scikit-image | skimage/viewer/tests/test_widgets.py | 1 | 2740 |
import os
from skimage import data, img_as_float, io
from skimage.viewer import ImageViewer, viewer_available
from skimage.viewer.widgets import (
Slider, OKCancelButtons, SaveButtons, ComboBox, Text)
from skimage.viewer.plugins.base import Plugin
from skimage.viewer.qt import QtGui, QtCore
from numpy.testing import assert_almost_equal, assert_equal
from numpy.testing.decorators import skipif
def get_image_viewer():
image = data.coins()
viewer = ImageViewer(img_as_float(image))
viewer += Plugin()
return viewer
@skipif(not viewer_available)
def test_combo_box():
viewer = get_image_viewer()
cb = ComboBox('hello', ('a', 'b', 'c'))
viewer.plugins[0] += cb
assert_equal(str(cb.val), 'a')
assert_equal(cb.index, 0)
cb.index = 2
assert_equal(str(cb.val), 'c'),
assert_equal(cb.index, 2)
@skipif(not viewer_available)
def test_text_widget():
viewer = get_image_viewer()
txt = Text('hello', 'hello, world!')
viewer.plugins[0] += txt
assert_equal(str(txt.text), 'hello, world!')
txt.text = 'goodbye, world!'
assert_equal(str(txt.text), 'goodbye, world!')
@skipif(not viewer_available)
def test_slider_int():
viewer = get_image_viewer()
sld = Slider('radius', 2, 10, value_type='int')
viewer.plugins[0] += sld
assert_equal(sld.val, 4)
sld.val = 6
assert_equal(sld.val, 6)
sld.editbox.setText('5')
sld._on_editbox_changed()
assert_equal(sld.val, 5)
@skipif(not viewer_available)
def test_slider_float():
viewer = get_image_viewer()
sld = Slider('alpha', 2.1, 3.1, value=2.1, value_type='float',
orientation='vertical', update_on='move')
viewer.plugins[0] += sld
assert_equal(sld.val, 2.1)
sld.val = 2.5
assert_almost_equal(sld.val, 2.5, 2)
sld.editbox.setText('0.1')
sld._on_editbox_changed()
assert_almost_equal(sld.val, 2.5, 2)
@skipif(not viewer_available)
def test_save_buttons():
viewer = get_image_viewer()
sv = SaveButtons()
viewer.plugins[0] += sv
import tempfile
_, filename = tempfile.mkstemp(suffix='.png')
os.remove(filename)
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QtGui.QApplication.quit())
sv.save_to_stack()
sv.save_to_file(filename)
img = img_as_float(data.imread(filename))
assert_almost_equal(img, viewer.image)
img = io.pop()
assert_almost_equal(img, viewer.image)
@skipif(not viewer_available)
def test_ok_buttons():
viewer = get_image_viewer()
ok = OKCancelButtons()
viewer.plugins[0] += ok
ok.update_original_image(),
ok.close_plugin()
| bsd-3-clause | -3,345,162,663,781,999,600 | 24.346154 | 66 | 0.629197 | false |
hrahadiant/mini_py_project | basic_number_game_v0.py | 1 | 1543 | # basic number game
import random
# rules
# you must enter the integer
# for win this game, you must hit the secret number within 3 chances
print("Basic rules:")
print("You only have 3 chances to guess the number")
print("You can type the integer between 1-10 only")
print("You can choose play again when you lose")
print("Enjoy this game!")
guess_limit = 3
def play_again():
play_more = input("Do you want to play again? y/n ")
if play_more.lower() == "n":
print("Bye!")
exit()
elif play_more.lower() == "y":
main()
def check_number(number, hit):
global guess_limit
guess_limit -= 1
number = int(number)
if number == hit:
print("You hit the number!")
play_again()
elif number > hit:
print("Your guess is too high.")
print("Try another number. Remaining number of guesses is {}".format(guess_limit))
elif number < hit:
print("Your guess is too low.")
print("Try another number. Remaining number of guesses is {}". format(guess_limit))
if guess_limit == 0:
print("Sorry, you lose this game. My secret number is {}".format(hit))
play_again()
def check_hit(number):
try:
int(number)
except ValueError:
print("Please input the integer between 1 - 10")
main()
def main():
hit_number = random.randint(1, 10)
while True:
guess_number = input("Guess the number (1 - 10)> ")
check_hit(guess_number)
check_number(guess_number, hit_number)
main()
| apache-2.0 | -2,485,010,881,475,866,600 | 23.887097 | 91 | 0.616332 | false |
huntzhan/magic-constraints | magic_constraints/argument.py | 1 | 2768 | # -*- coding: utf-8 -*-
from __future__ import (
division, absolute_import, print_function, unicode_literals,
)
from builtins import * # noqa
from future.builtins.disabled import * # noqa
from magic_constraints.exception import MagicSyntaxError, MagicTypeError
def transform_to_slots(constraints_package, *args, **kwargs):
class UnFill(object):
pass
plen = len(constraints_package.parameters)
if len(args) > plen:
raise MagicSyntaxError(
'argument length unmatched.',
parameters=constraints_package.parameters,
args=args,
)
slots = [UnFill] * plen
unfill_count = plen
# 1. fill args.
for i, val in enumerate(args):
slots[i] = val
unfill_count -= len(args)
# 2. fill kwargs.
for key, val in kwargs.items():
if key not in constraints_package.name_hash:
raise MagicSyntaxError(
'invalid keyword argument',
parameters=constraints_package.parameters,
key=key,
)
i = constraints_package.name_hash[key]
if slots[i] is not UnFill:
raise MagicSyntaxError(
'key reassignment error.',
parameters=constraints_package.parameters,
key=key,
)
slots[i] = val
unfill_count -= 1
# 3. fill defaults if not set.
# 3.1. deal with the case that default not exists.
default_begin = constraints_package.start_of_defaults
if default_begin < 0:
default_begin = plen
# 3.2 fill defaults.
for i in range(default_begin, plen):
parameter = constraints_package.parameters[i]
j = constraints_package.name_hash[parameter.name]
if slots[j] is UnFill:
slots[j] = parameter.default
unfill_count -= 1
# 4. test if slots contains UnFill.
if unfill_count != 0:
raise MagicSyntaxError(
'slots contains unfilled argument(s).',
parameters=constraints_package.parameters,
slots=slots,
)
return slots
def check_and_bind_arguments(parameters, slots, bind_callback):
plen = len(parameters)
for i in range(plen):
arg = slots[i]
parameter = parameters[i]
wrapper = parameter.wrapper_for_deferred_checking()
# defer checking by wrapping the element of slot.
if wrapper:
slots[i] = wrapper(arg)
# check now.
elif not parameter.check_instance(arg):
raise MagicTypeError(
'argument unmatched.',
parameter=parameter,
argument=arg,
)
# bind.
bind_callback(parameter.name, arg)
| mit | 3,002,848,033,951,175,000 | 26.405941 | 72 | 0.58237 | false |
mfnch/pyrtist | pyrtist/lib2d/core_types.py | 1 | 16345 | # Copyright (C) 2017 Matteo Franchin
#
# This file is part of Pyrtist.
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
'''Fundamental types for the pyrtist 2D graphic library.
This module defines Point, Matrix, and other types commonly used in the
library.
'''
__all__ = ('create_enum', 'alias', 'Scalar', 'Point', 'Px', 'Py', 'Pang',
'PointTaker', 'GenericMatrix', 'Matrix', 'Close', 'Container',
'Offset', 'Scale', 'Center', 'AngleDeg', 'Radii', 'Through', 'Tri',
'View', 'Taker', 'combination', 'RejectError', 'Args')
import math
import numbers
import copy
from .base import Taker, combination, RejectError, Args
class Enum(object):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __str__(self):
return '{}.{}'.format(get_class_name(self), self.name)
def __repr__(self):
args = ((self.name,) if self.value is None
else (self.name, self.value))
return '{}({})'.format(get_class_name(self),
', '.join(map(repr, args)))
def create_enum(name, doc, *enums):
d = ({'__doc__': doc} if doc is not None else {})
new_class = type(name, (Enum,), d)
for name in enums:
setattr(new_class, name, new_class(name))
return new_class
def alias(name, target, **attrs):
return type(name, (target,), attrs)
class Scalar(float):
'''Used to identify scalars that need to be transformed (such as line
widths) in a CmdStream.
'''
class Point(object):
'''Point with 2 components.
A Point() can be created in one of the following ways:
- Point(), Point(x) or Point(x, y). Use the provided argument to set the
x and y components. Missing components are set to zero.
- Point(Point(...)) to copy the point given as first argument.
- Point(tuple) to set from a tuple.
'''
@staticmethod
def vx(delta_x=1.0):
'''Return a vector with the given x component and zero y component.'''
return Point(delta_x)
@staticmethod
def vy(delta_y=1.0):
'''Return a vector with the given y component and zero x component.'''
return Point(0.0, delta_y)
@staticmethod
def vangle(angle):
'''Return a unit vector forming the specified angle with the x axis.'''
return Point(math.cos(angle), math.sin(angle))
@staticmethod
def sum(points, default=None):
return sum(points, default or Point())
@staticmethod
def interpolate(point_list, index):
'''Interpolate a point according to the given index.
Given a list of points `point_list` return an interpolated point,
according to the given index `index`. In particular, if `index` is:
- an integer, then this function simply returns `point_list[index]`
- a floating point number, then this function returns an interpolation
of `points_list[floor(index)]` and `points_list[ceil(index)]`.
- a Point, then the result is similar to just giving a single float
`index.x`, with the additional addition of a vector `index.y * ort`
where `ort` is the vector orthogonal to the segment selected by
`index.x`.
'''
if isinstance(index, int):
return point_list[index]
elif isinstance(index, float):
index = Point(index, 0.0)
else:
index = Point(index)
n = len(point_list)
if n < 2:
if n == 0:
raise ValueError('Attempt to index empty point container')
return point_list[0]
prev_idx = math.floor(index.x)
x = index.x - prev_idx
prev = point_list[int(prev_idx) % n]
succ = point_list[(int(prev_idx) + 1) % n]
ret = prev*(1.0 - x) + succ*x
if index.y == 0.0:
return ret
ort = (succ - prev).ort()
return ret + ort * index.y
def __init__(self, *args, **kwargs):
self.x = self.y = 0.0
self.set(*args, **kwargs)
def set(self, *args, **kwargs):
if len(args) > 0:
arg0 = args[0]
if isinstance(arg0, numbers.Number):
xy = args
elif isinstance(arg0, (Point, tuple)):
xy = tuple(arg0) + args[1:]
else:
raise TypeError('Cannot handle first argument of {}()'
.format(self.__class__.__name__))
if len(xy) == 2:
self.x = float(xy[0])
self.y = float(xy[1])
elif len(xy) > 2:
raise TypeError('Too many arguments to {}()'
.format(self.__class__.__name__))
else:
assert len(xy) == 1
self.x = xy[0]
# The code below is there for compatibility reasons, but we should get
# rid of it eventually.
if 'x' in kwargs:
self.x = kwargs['x']
if 'y' in kwargs:
self.y = kwargs['y']
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.x, self.y)
def __iter__(self):
return iter((self.x, self.y))
def __neg__(self):
return type(self)(*tuple(-x for x in self))
def __pos__(self):
return self.copy()
def __add__(self, value):
if value == 0.0:
# This allows using `sum' without having to provide a `start'
# value. For example: sum([Point(...), Point(...), ...])
return self.copy()
return Point(x=self.x + value.x, y=self.y + value.y)
def __sub__(self, value):
return Point(x=self.x - value.x, y=self.y - value.y)
def __mul__(self, value):
if isinstance(value, numbers.Number):
return Point(x=self.x*value, y=self.y*value)
else:
return float(self.x*value.x + self.y*value.y)
def __rmul__(self, value):
return self.__mul__(value)
def __div__(self, value):
return Point(x=self.x/value, y=self.y/value)
__truediv__ = __div__
def copy(self):
return Point(x=self.x, y=self.y)
def dot(self, p):
'''Return the scalar product with p.'''
return self.x*p.x + self.y*p.y
def norm2(self):
'''Return the square of the norm for this vector.'''
return self.x*self.x + self.y*self.y
def norm(self):
'''Return the vector's norm.'''
return math.sqrt(self.norm2())
def angle(self):
'''Return the angle between the vector and the x axis.'''
return math.atan2(self.y, self.x)
def normalize(self):
'''Normalized this vector.'''
n = self.norm()
if n != 0.0:
self.x /= n
self.y /= n
def normalized(self):
'''Return a normalized copy of this vector.'''
p = self.copy()
p.normalize()
return p
def ort(self):
'''Return the ortogonal vector, rotated by 90 degrees anticlockwise.'''
return Point(-self.y, self.x)
def mirror(self, axes):
'''Mirror the point with respect to the x axis of the given Axes()
object.
'''
d = self - axes.origin
u10 = (axes.one_zero - axes.origin).normalized()
d_ort = u10*u10.dot(d)
return axes.origin - d + d_ort*2.0
def mirror_x(self, p):
'''Mirror the point with respect to an horizontal axis passing
through `p`.
'''
return Point(self.x, 2.0*p.y - self.y)
def mirror_y(self, p):
'''Mirror the point with respect to a vertical axis passing
through `p`.
'''
return Point(2.0*p.x - self.x, self.y)
def mirror_xy(self, p):
'''Mirror this point with respect to the specified point.'''
return 2.0*p - self
def Px(value):
return Point.vx(value)
def Py(value):
return Point.vy(value)
def Pang(angle):
'''Return a Point of unit norm forming the specified angle with the x axis.
'''
return Point(math.cos(angle), math.sin(angle))
class PointTaker(Taker):
def __init__(self, *args):
self.points = []
super(PointTaker, self).__init__(*args)
def __iter__(self):
return iter(self.points)
def __len__(self):
return len(self.points)
def __getitem__(self, index):
return self.points[index]
@combination(Point, PointTaker)
def fn(point, point_taker):
point_taker.points.append(point)
@combination(tuple, PointTaker)
def fn(tp, point_taker):
if len(tp) != 2:
raise RejectError()
point_taker.take(Point(tp))
class GenericMatrix(object):
@classmethod
def diag(cls, *entries):
'''Construct a diagonal matrix with the given diagonal entries.'''
m, n = cls.size
num_args = min(m, n)
if len(entries) < num_args:
raise TypeError('diag takes exactly {} arguments ({} given)'
.format(num_args, len(entries)))
mx = [[(entries[i] if i == j else 0.0) for j in range(n)]
for i in range(m)]
return cls(mx)
class Matrix(GenericMatrix):
size = (2, 3)
identity = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]]
@classmethod
def rotation(cls, angle):
rcos = math.cos(angle)
rsin = math.sin(angle)
return cls([[rcos, -rsin, 0.0],
[rsin, rcos, 0.0]])
@classmethod
def translation(cls, t):
return cls([[1.0, 0.0, t.x],
[0.0, 1.0, t.y]])
def __init__(self, value=None):
super(Matrix, self).__init__()
self.set(value)
def set(self, value):
'''Set the matrix to the given value.'''
if value is None:
value = Matrix.identity
elif isinstance(value, Matrix):
value = value.value
self.value = [list(value[0]), list(value[1])]
def __repr__(self):
return 'Matrix({})'.format(repr(self.value))
def __mul__(self, b):
if isinstance(b, Point):
return self.apply(b)
if isinstance(b, tuple) and len(b) == 2:
return self.apply(Point(b))
ret = self.copy()
if isinstance(b, numbers.Number):
ret.scale(b)
else:
ret.multiply(b)
return ret
def get_entries(self):
'''Get the matrix entries as a tuple of 6 scalars.'''
return tuple(self.value[0] + self.value[1])
def multiply(self, b):
(a11, a12, a13), (a21, a22, a23) = ab = self.value
(b11, b12, b13), (b21, b22, b23) = b.value
ab[0][0] = a11*b11 + a12*b21; ab[0][1] = a11*b12 + a12*b22
ab[1][0] = a21*b11 + a22*b21; ab[1][1] = a21*b12 + a22*b22
ab[0][2] = a13 + a11*b13 + a12*b23
ab[1][2] = a23 + a21*b13 + a22*b23
def __rmul__(self, b):
if isinstance(b, numbers.Number):
return self.__mul__(b)
raise NotImplementedError()
def copy(self):
'''Return a copy of the matrix.'''
return Matrix(value=self.value)
def scale(self, s):
'''Scale the matrix by the given factor (in-place).'''
v = self.value
v[0][0] *= s; v[0][1] *= s; v[0][2] *= s
v[1][0] *= s; v[1][1] *= s; v[1][2] *= s
def translate(self, p):
'''Translate the matrix by the given Point value (in-place).'''
self.value[0][2] += p.x
self.value[1][2] += p.y
def apply(self, p):
'''Apply the matrix to a Point.'''
if not isinstance(p, Point):
p = Point(p)
(a11, a12, a13), (a21, a22, a23) = self.value
return Point(a11*p.x + a12*p.y + a13,
a21*p.x + a22*p.y + a23)
def det(self):
'''Return the determinant of the matrix.'''
m = self.value
return m[0][0]*m[1][1] - m[0][1]*m[1][0]
def invert(self):
'''Invert the matrix in place.'''
(m11, m12, m13), (m21, m22, m23) = m = self.value
det = m11*m22 - m12*m21
if det == 0.0:
raise ValueError('The matrix is singular: cannot invert it')
m[0][0] = new11 = m22/det; m[0][1] = new12 = -m12/det
m[1][0] = new21 = -m21/det; m[1][1] = new22 = m11/det
m[0][2] = -new11*m13 - new12*m23
m[1][2] = -new21*m13 - new22*m23
def get_inverse(self):
'''Return the inverse of the matrix.'''
ret = self.copy()
ret.invert()
return ret
Close = create_enum('Close', 'Whether to close a path',
'no', 'yes')
class Container(object):
def __init__(self, *args):
self.args = args
def __repr__(self):
return '{name}({args})'.format(name=self.__class__.__name__,
args=', '.join(map(repr, self.args)))
def __iter__(self):
return iter(self.args)
def copy(self):
return self.__class__(*self.args)
def check(self, min_args, max_args):
if len(self.args) < min_args:
raise TypeError('{} object needs at least {} arguments'
.format(self.__class__.__name__, min_args))
if len(self.args) > max_args:
raise TypeError('{} object takes at most {} arguments'
.format(self.__class__.__name__, max_args))
class Offset(Point):
'''Alias for Point used to pass unitless offsets.'''
class Scale(Point):
'''Alias for Point used to pass scale factors.'''
def __init__(self, *args):
super(Scale, self).__init__()
self.y = None
self.set(*args)
if self.y is None:
self.y = self.x
class Center(Point):
'''Alias for Point used to pass the center for a rotation.'''
class Radii(Container):
'''Container which groups one or more radii (e.g. the x, y radii of
an ellipsoid.
'''
class Through(list):
'''List of points that a geometric shape (e.g. a Circle) passes
through.'''
def __init__(self, *args):
super(Through, self).__init__(args)
class AngleDeg(float):
'''Floating point number representing an angle in degrees.'''
class Tri(Container):
'''Container which groups up to 3 points used to define a Bezier curve.'''
def __init__(self, *args):
n = len(args)
if n == 1:
self.args = (None, args[0], None)
elif n == 2:
self.args = (args[0], args[1], None)
elif n == 3:
self.args = args
else:
raise TypeError('Tri takes at most 3 points')
def copy(self, p):
return type(self)(*p.args)
@property
def p(self):
return self.args[1]
@property
def ip(self):
p_in = self.args[0]
if p_in is not None:
return p_in
p_out = self.args[2]
if p_out is not None:
return 2.0*self.args[1] - p_out
return self.args[1]
@property
def op(self):
p_out = self.args[2]
if p_out is not None:
return p_out
p_in = self.args[0]
if p_in is not None:
return 2.0*self.args[1] - p_in
return self.args[1]
class View(object):
'''Object used to pass information to the GUI.'''
def __init__(self, bbox, origin, size):
self.bbox = bbox
self.origin = origin
self.size = size
def __repr__(self):
b1 = self.bbox.min_point
b2 = self.bbox.max_point
bbox_args = ', '.join(map(str, (b1.x, b1.y, b2.x, b2.y)))
other_args = ', '.join(map(str, (self.origin.x, self.origin.y,
self.size.x, self.size.y)))
return '{}\n{}\n'.format(bbox_args, other_args)
| lgpl-2.1 | 7,144,760,757,763,698,000 | 29.212569 | 79 | 0.542918 | false |
yeephycho/densenet-tensorflow | data_provider/data_provider.py | 1 | 5117 | # Brief: Data provdier for image classification using tfrecord
# Data: 28/Aug./2017
# E-mail: [email protected]
# License: Apache 2.0
# By: Yeephycho @ Hong Kong
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import sys
import config as config
FLAGS = tf.app.flags.FLAGS
DATA_DIR = FLAGS.train_data_path
TRAINING_SET_SIZE = FLAGS.TRAINING_SET_SIZE
BATCH_SIZE = FLAGS.BATCH_SIZE
IMAGE_SIZE = FLAGS.IMAGE_SIZE
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# image object from tfrecord
class _image_object:
def __init__(self):
self.image = tf.Variable([], dtype = tf.string, trainable=False)
self.height = tf.Variable([], dtype = tf.int64, trainable=False)
self.width = tf.Variable([], dtype = tf.int64, trainable=False)
self.filename = tf.Variable([], dtype = tf.string, trainable=False)
self.label = tf.Variable([], dtype = tf.int32, trainable=False)
def read_and_decode(filename_queue):
with tf.name_scope('data_provider'):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features = {
"image/encoded": tf.FixedLenFeature([], tf.string),
"image/height": tf.FixedLenFeature([], tf.int64),
"image/width": tf.FixedLenFeature([], tf.int64),
"image/filename": tf.FixedLenFeature([], tf.string),
"image/class/label": tf.FixedLenFeature([], tf.int64),})
image_encoded = features["image/encoded"]
image_raw = tf.image.decode_jpeg(image_encoded, channels=3)
image_object = _image_object()
# image_object.image = tf.image.resize_image_with_crop_or_pad(image_raw, IMAGE_SIZE, IMAGE_SIZE)
image_object.image = tf.image.resize_images(image_raw, [IMAGE_SIZE, IMAGE_SIZE], method=0, align_corners=True)
image_object.height = features["image/height"]
image_object.width = features["image/width"]
image_object.filename = features["image/filename"]
image_object.label = tf.cast(features["image/class/label"], tf.int64)
return image_object
def feed_data(if_random = True, if_training = True):
with tf.name_scope('image_reader_and_preprocessor') as scope:
if(if_training):
filenames = [os.path.join(DATA_DIR, "train.tfrecord")]
else:
filenames = [os.path.join(DATA_DIR, "test.tfrecord")]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError("Failed to find file: " + f)
filename_queue = tf.train.string_input_producer(filenames)
image_object = read_and_decode(filename_queue)
if(if_training):
image = tf.cast(tf.image.random_flip_left_right(image_object.image), tf.float32)
# image = tf.image.adjust_gamma(tf.cast(image_object.image, tf.float32), gamma=1, gain=1) # Scale image to (0, 1)
# image = tf.image.per_image_standardization(image)
else:
image = tf.cast(image_object.image, tf.float32)
# image = tf.image.per_image_standardization(image_object.image)
label = image_object.label
filename = image_object.filename
if(if_training):
num_preprocess_threads = 2
else:
num_preprocess_threads = 1
if(if_random):
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(TRAINING_SET_SIZE * min_fraction_of_examples_in_queue)
print("Filling queue with %d images before starting to train. " "This will take some time." % min_queue_examples)
image_batch, label_batch, filename_batch = tf.train.shuffle_batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = num_preprocess_threads,
capacity = min_queue_examples + 3 * BATCH_SIZE,
min_after_dequeue = min_queue_examples)
image_batch = tf.reshape(image_batch, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch = tf.one_hot(tf.add(label_batch, label_offset), depth=5, on_value=1.0, off_value=0.0)
else:
image_batch, label_batch, filename_batch = tf.train.batch(
[image, label, filename],
batch_size = BATCH_SIZE,
num_threads = num_preprocess_threads)
image_batch = tf.reshape(image_batch, (BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3))
label_offset = -tf.ones([BATCH_SIZE], dtype=tf.int64, name="label_batch_offset")
label_batch = tf.one_hot(tf.add(label_batch, label_offset), depth=5, on_value=1.0, off_value=0.0)
return image_batch, label_batch, filename_batch
| apache-2.0 | -6,233,930,229,090,380,000 | 42.364407 | 125 | 0.632402 | false |
fooker/pyco | renderers/smoothgallery.py | 1 | 3383 | # This file is part of PyCo.
#
# PyCo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCo. If not, see <http://www.gnu.org/licenses/>.
# Renders the pictures in the given folder into a picture gallery using
# smoothgallery. The configuration of the gallery has the following format:
# {
# "id" : "$ID$",
# "path" : "$PATH$"
# }
#
# The $ID$ must be a unique id for the gallery. The path contains the pictures.
# Additional to the pictures the given path must contain the two folders
# "smalls" and "thumbs".
#
# The folder "smalls" must contain a smaller variant of the pictures with the
# same filename and a size of 800x600 pixels.
#
# The "thumbs" fulder must contain a smaller variant of the pictures with the
# same filename and a sizeo of 100x75 pixels.
import simplejson as json
def renderSmoothgallery(content):
# Parse module configuration
config = json.loads(content)
# Find list of images
files = os.listdir(os.path.normpath(settings['base_path'] + '/' + config['path']))
# Build galary page
output = ''
output += '<script src="/smoothgallery/scripts/mootools.v1.11.js" type="text/javascript"></script>'
output += '<script src="/smoothgallery/scripts/jd.gallery.js" type="text/javascript"></script>'
output += '<link rel="stylesheet" href="/smoothgallery/css/jd.gallery.css" type="text/css" media="screen" />'
output += '<div id="' + config['id'] + '">'
for filename in files:
if not os.path.isdir(os.path.normpath(settings['base_path'] + '/' + config['path'] + '/' + filename)):
path_real = os.path.normpath(settings['base_path'] + '/' + config['path'] + '/' + filename)
path_image = os.path.normpath(config['path'] + '/' + filename)
path_thumb_real = os.path.normpath(settings['base_path'] + '/' + config['path'] + '/thumbs/' + filename)
path_thumb_image = os.path.normpath(config['path'] + '/thumbs/' + filename)
path_small_real = os.path.normpath(settings['base_path'] + '/' + config['path'] + '/smalls/' + filename)
path_small_image = os.path.normpath(config['path'] + '/smalls/' + filename)
output += '<div class="imageElement">'
output += '<h3>' + filename + '</h3>'
output += '<p></p>'
output += '<a href="' + path_image + '" title="open image" class="open"></a>'
output += '<img src="' + path_small_image + '" class="full" />'
output += '<img src="' + path_thumb_image + '" class="thumbnail" />'
output += '</div>'
output += '</div>'
output += '<script type="text/javascript">'
output += 'function startGallery() {'
output += 'var ' + config['id'] + ' = new gallery($(\'' + config['id'] + '\'), {'
output += 'timed: false'
output += '});'
output += '}'
output += 'window.addEvent(\'domready\', startGallery);'
output += '</script>'
return output
renderers['smoothgallery'] = renderSmoothgallery
| gpl-3.0 | -3,893,615,336,382,999,600 | 41.2875 | 111 | 0.650015 | false |
jmrbcu/foundation | foundation/log_utils.py | 1 | 1148 | # python imports
import os
import sys
import logging
import platform
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
def setup_root_logger(level=logging.DEBUG, formatter=None, log_file=None,
log_size=5242880, log_count=5):
logger = logging.getLogger()
logger.setLevel(level)
if formatter is None:
formatter = '"%(asctime)s - %(levelname)s - %(name)s - %(message)s"'
formatter = logging.Formatter(formatter)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(NullHandler())
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
rotating_handler = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=log_size,
backupCount=log_count
)
rotating_handler.setFormatter(formatter)
logger.addHandler(rotating_handler)
return logger
| gpl-2.0 | 2,679,257,480,493,880,300 | 27 | 76 | 0.658537 | false |
jfterpstra/bluebottle | setup.py | 1 | 4157 | #!/usr/bin/env python
import os
import bluebottle
from setuptools import setup, find_packages
def read_file(name):
return open(os.path.join(os.path.dirname(__file__), name)).read()
readme = read_file('README.rst')
changes = ''
dependency_links = [
'git+https://github.com/onepercentclub/django-taggit-autocomplete-modified.git@8e7fbc2deae2f1fbb31b574bc8819d9ae7c644d6#egg=django-taggit-autocomplete-modified-0.1.1b1',
'git+https://github.com/onepercentclub/[email protected]#egg=django-bb-salesforce-1.2.0',
'git+https://github.com/onepercentclub/[email protected]#egg=django-tenant-extras-2.0.6',
'git+https://github.com/onepercentclub/[email protected]#egg=django-token-auth-0.3.0',
'hg+https://bitbucket.org/jdiascarvalho/django-filetransfers@89c8381764da217d72f1fa396ce3929f0762b8f9#egg=django-filetransfers-0.1.1'
]
install_requires = [
'Babel==2.3.4',
'BeautifulSoup==3.2.1',
'Django==1.9.6',
'Pillow==3.2.0',
'South==1.0.2',
'Sphinx==1.4.1',
'bunch==1.0.1',
'celery==3.1.23',
'django-celery==3.1.17',
'django-choices==1.4.2',
'django-extensions==1.6.7',
'django-exportdb==0.4.6',
'django-filter==0.13.0',
'django-geoposition==0.2.2',
'django-localflavor==1.2',
'django-modeltranslation==0.11',
'django-taggit==0.18.3',
'django-tinymce==2.3.0',
'django-uuidfield==0.5.0',
'django-wysiwyg==0.7.1',
'django-dynamic-fixture==1.8.5',
'django-fluent-dashboard==0.6.1',
'djangorestframework==3.3.3',
'dkimpy==0.5.6',
'micawber==0.3.3',
'requests==2.5.1',
'sorl-thumbnail==12.3',
'transifex-client==0.11',
'django-tools==0.30.0',
'django-loginas==0.1.9',
'beautifulsoup4==4.4.1',
'psycopg2==2.6.1',
'django-fsm==2.4.0',
'suds-jurko==0.6',
'django-ipware==1.1.5',
'pygeoip==0.3.2',
'python-social-auth==0.2.19',
'python-memcached==1.57',
'lxml==3.6.0',
'unicodecsv==0.14.1',
'python-dateutil==2.5.3',
'gunicorn==19.5.0',
'surlex==0.2.0',
'django_polymorphic==0.9.2',
'fabric',
'django-tenant-schemas==1.6.2',
'raven==5.16.0',
'regex==2016.4.25',
'djangorestframework-jwt==1.8.0',
'django-filetransfers==0.1.1',
'django-admin-tools==0.7.2',
'django-rest-swagger==0.3.6',
'django-lockdown==1.2',
'mixpanel==4.3.0',
'wheel==0.29.0',
# Github requirements
'django-taggit-autocomplete-modified==0.1.1b1',
'django-fluent-contents==1.1.4',
'django-bb-salesforce==1.2.0',
'django-tenant-extras==2.0.6',
'django-tenant-extras==2.0.5',
'django-token-auth==0.3.0',
]
tests_requires = [
'httmock==1.2.5',
'coverage==4.0.3',
'django-nose==1.4.3',
'django-setuptest==0.2.1',
'factory-boy==2.7.0',
'mock==2.0.0',
'nose==1.3.7',
'pylint==1.5.5',
'tdaemon==0.1.1',
'WebTest==2.0.21',
'django-webtest==1.7.9',
'pyquery==1.2.13'
]
dev_requires = [
'ipdb'
]
setup(
name='bluebottle',
version=bluebottle.__version__,
license='BSD',
# Packaging.
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=install_requires,
dependency_links=dependency_links,
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': dev_requires,
'test': tests_requires,
},
include_package_data=True,
zip_safe=False,
# Metadata for PyPI.
description='Bluebottle, the crowdsourcing framework initiated by the 1%Club.',
long_description='\n\n'.join([readme, changes]),
author='1%Club',
author_email='[email protected]',
platforms=['any'],
url='https://github.com/onepercentclub/bluebottle',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Application Frameworks'
]
)
| bsd-3-clause | -624,714,853,652,360,200 | 28.48227 | 173 | 0.615588 | false |
eranroz/revscoring | revscoring/languages/french.py | 1 | 1882 | import sys
import enchant
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from .language import Language, LanguageUtility
STEMMER = SnowballStemmer("french")
STOPWORDS = set(stopwords.words('french') + ['a'])
BADWORDS = set([
'anus',
'con', 'cul',
'fesse', 'Foutre',
'gay',
'herpes', 'hiv', 'homosexuel',
'idiot',
'lesbien',
'merde', 'merdique',
'penis', 'prostituee', 'Putain', 'putes',
'Salop', 'stupide',
])
STEMMED_BADWORDS = set(STEMMER.stem(w) for w in BADWORDS)
try:
DICTIONARY = enchant.Dict("fr")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'fr'. " +
"Consider installing 'myspell-fr'.")
def stem_word_process():
def stem_word(word):
return STEMMER.stem(word.lower())
return stem_word
stem_word = LanguageUtility("stem_word", stem_word_process, depends_on=[])
def is_badword_process(stem_word):
def is_badword(word):
return stem_word(word) in STEMMED_BADWORDS
return is_badword
is_badword = LanguageUtility("is_badword", is_badword_process,
depends_on=[stem_word])
def is_misspelled_process():
def is_misspelled(word):
return not DICTIONARY.check(word)
return is_misspelled
is_misspelled = LanguageUtility("is_misspelled", is_misspelled_process,
depends_on=[])
def is_stopword_process():
def is_stopword(word):
return word.lower() in STOPWORDS
return is_stopword
is_stopword = LanguageUtility("is_stopword", is_stopword_process, depends_on=[])
sys.modules[__name__] = Language(
__name__,
[stem_word, is_badword, is_misspelled, is_stopword]
)
"""
Implements :class:`~revscoring.languages.language.Language` for French. Comes
complete with all language utilities.
"""
| mit | -3,909,994,930,598,729,700 | 27.953846 | 80 | 0.659405 | false |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/openmdao.lib-0.13.0-py2.7.egg/openmdao/lib/components/test/test_broadcaster.py | 1 | 2483 | import unittest
from openmdao.main.api import Assembly, Component
from openmdao.lib.components.broadcaster import Broadcaster
from openmdao.main.datatypes.api import Float
class Dummy(Component):
x = Float(iotype="out")
y = Float(iotype="in")
class testBroadcaster(unittest.TestCase):
def test_create(self):
b1 = Broadcaster(['x', 'y', 'z'])
self.assertTrue(hasattr(b1, "x_in"))
self.assertTrue(hasattr(b1, "x"))
self.assertTrue(hasattr(b1, "y_in"))
self.assertTrue(hasattr(b1, "y"))
self.assertTrue(hasattr(b1, "z_in"))
self.assertTrue(hasattr(b1, "z"))
b1.names = ['a', 'b']
self.assertFalse(hasattr(b1, "x_in"))
self.assertFalse(hasattr(b1, "x"))
self.assertFalse(hasattr(b1, "y_in"))
self.assertFalse(hasattr(b1, "y"))
self.assertFalse(hasattr(b1, "z_in"))
self.assertFalse(hasattr(b1, "z"))
self.assertTrue(hasattr(b1, 'a_in'))
self.assertTrue(hasattr(b1, 'a'))
self.assertTrue(hasattr(b1, 'b_in'))
self.assertTrue(hasattr(b1, 'b'))
b1.types = {'a': Float, 'default': Float}
self.assertTrue(hasattr(b1, 'a_in'))
self.assertTrue(hasattr(b1, 'a'))
self.assertTrue(hasattr(b1, 'b_in'))
self.assertTrue(hasattr(b1, 'b'))
def test_execute(self):
b1 = Broadcaster(['x', 'y'])
b1.x_in = 2
b1.y_in = 1
b1.run()
self.assertEqual(b1.x, 2)
self.assertEqual(b1.y, 1)
def test_connections(self):
asm = Assembly()
asm.add('dummy1', Dummy())
asm.add('dummy2', Dummy())
asm.add('bcast', Broadcaster(['x']))
asm.connect('dummy1.x', 'bcast.x_in')
asm.connect('bcast.x', 'dummy2.y')
self.assertEqual(set(asm.list_connections()), set([('dummy1.x', 'bcast.x_in'), ('bcast.x', 'dummy2.y')]))
asm.bcast.names = ['z']
self.assertEqual(asm.list_connections(), [])
def test_error(self):
try:
b = Broadcaster(['x'], {'y': Float})
except ValueError, err:
self.assertEqual(str(err), ': No type was provided for "x" and no "default" type was provided. '
'Specify at least one of these.')
else:
self.fail('ValueError Expected')
| gpl-2.0 | -3,170,840,660,115,608,000 | 30.833333 | 113 | 0.533629 | false |
nyu-mhealth/project-smsurvey | main/smsurvey/core/services/enrollment_service.py | 1 | 2019 | import time
import pytz
from datetime import datetime
from smsurvey.core.model.model import Model
from smsurvey.core.model.query.where import Where
class EnrollmentService:
@staticmethod
def get(enrollment_id):
enrollments = Model.repository.enrollments
return enrollments.select(Where(enrollments.id, Where.EQUAL, enrollment_id))
@staticmethod
def get_by_owner(owner_id):
enrollments = Model.repository.enrollments
return enrollments.select(Where(enrollments.owner_id, Where.EQUAL, owner_id), force_list=True)
@staticmethod
def add_enrollment(name, owner_id, open_date, close_date, expiry_date):
enrollments = Model.repository.enrollments
enrollment = enrollments.create()
enrollment.name = name
enrollment.owner_id = owner_id
enrollment.open_date = open_date
enrollment.close_date = close_date
enrollment.expiry_date = expiry_date
return enrollment.save()
@staticmethod
def delete_enrollment(enrollment_id):
enrollments = Model.repository.enrollments
enrollments.delete(Where(enrollments.id, Where.E, enrollment_id))
@staticmethod
def is_enrollment_open(enrollment_id):
enrollment = EnrollmentService.get(enrollment_id)
now = datetime.now()
return enrollment.open_date <= now < enrollment.close_date
@staticmethod
def enrollment_accessible(enrollment_id):
enrollment = EnrollmentService.get(enrollment_id)
return enrollment is not None and enrollment.expiry_date > datetime.now(tz=pytz.utc)
@staticmethod
def is_owned_by(enrollment_id, owner_id):
enrollment = EnrollmentService.get(enrollment_id)
return enrollment.owner_id == owner_id
@staticmethod
def participant_count(enrollment_id):
participants = Model.repository.participants
p = participants.select(Where(participants.enrollment_id, Where.E, enrollment_id), force_list=True)
return len(p)
| gpl-3.0 | 6,002,717,555,732,236,000 | 32.65 | 107 | 0.702823 | false |
gem/oq-engine | openquake/sep/tests/liquefaction/test_liquefaction.py | 1 | 4901 | import os
import unittest
import numpy as np
from openquake.sep.liquefaction import (
zhu_magnitude_correction_factor,
zhu_liquefaction_probability_general,
hazus_magnitude_correction_factor,
hazus_groundwater_correction_factor,
hazus_conditional_liquefaction_probability,
hazus_liquefaction_probability,
)
class test_zhu_functions(unittest.TestCase):
def test_zhu_magnitude_correction_factor(self):
mags = np.array([6.0, 7.0, 8.0])
test_res = np.array([0.5650244, 0.83839945, 1.18007706])
np.testing.assert_array_almost_equal(
zhu_magnitude_correction_factor(mags), test_res
)
def test_zhu_liquefaction_probability_general(self):
pass
class test_hazus_liquefaction_functions(unittest.TestCase):
def test_hazus_magnitude_correction_factor(self):
# magnitudes selected to roughly replicate Fig. 4.7 in the Hazus manual
mags = np.array([5.1, 6.1, 6.8, 7.6, 8.4])
Km = hazus_magnitude_correction_factor(mags)
test_res = np.array(
[1.5344407, 1.2845917, 1.1357584, 1.0000432, 0.9089488]
)
np.testing.assert_array_almost_equal(Km, test_res)
def test_hazus_gw_correction_factor_ft(self):
# replicates Fig. 4.8 in the Hazus manual
depth_ft = np.arange(4, 36, 4)
Kw = hazus_groundwater_correction_factor(depth_ft)
test_res = np.array(
[1.018, 1.106, 1.194, 1.282, 1.37, 1.458, 1.546, 1.634]
)
np.testing.assert_array_almost_equal(Kw, test_res)
def test_hazus_conditional_liquefaction_probability_vl(self):
# replicates Fig. 4.6 in the Hazus manual
pga_vl = np.linspace(0.2, 0.6, num=10)
cond_vl = hazus_conditional_liquefaction_probability(pga_vl, "vl")
test_res = np.array(
[
0.0,
0.0,
0.12177778,
0.30666667,
0.49155556,
0.67644444,
0.86133333,
1.0,
1.0,
1.0,
]
)
np.testing.assert_array_almost_equal(cond_vl, test_res)
def test_hazus_conditional_liquefaction_probability_l(self):
# Replicates Fig. 4.6 in the Hazus manual
# However values do not match figure exactly, though
# the formula and coefficients are double-checked...
pga_l = np.linspace(0.2, 0.6, num=10)
cond_l = hazus_conditional_liquefaction_probability(pga_l, "l")
test_res = np.array(
[
0.0,
0.18155556,
0.42911111,
0.67666667,
0.92422222,
1.0,
1.0,
1.0,
1.0,
1.0,
]
)
np.testing.assert_array_almost_equal(cond_l, test_res)
def test_hazus_conditional_liquefaction_probability_m(self):
# Replicates Fig. 4.6 in the Hazus manual
# However values do not match figure exactly, though
# the formula and coefficients are double-checked...
pga_m = np.linspace(0.1, 0.4, num=10)
cond_m = hazus_conditional_liquefaction_probability(pga_m, "m")
test_res = np.array(
[
0.0,
0.0,
0.11166667,
0.334,
0.55633333,
0.77866667,
1.0,
1.0,
1.0,
1.0,
]
)
np.testing.assert_array_almost_equal(cond_m, test_res)
def test_hazus_conditional_liquefaction_probability_h(self):
# Replicates Fig. 4.6 in the Hazus manual
# However values do not match figure exactly, though
# the formula and coefficients are double-checked...
pga_h = np.linspace(0.1, 0.3, num=10)
cond_h = hazus_conditional_liquefaction_probability(pga_h, "h")
test_res = np.array(
[
0.0,
0.01744444,
0.18788889,
0.35833333,
0.52877778,
0.69922222,
0.86966667,
1.0,
1.0,
1.0,
]
)
np.testing.assert_array_almost_equal(cond_h, test_res)
def test_hazus_conditional_liquefaction_probability_vh(self):
# Replicates Fig. 4.6 in the Hazus manual
# However values do not match figure exactly, though
# the formula and coefficients are double-checked...
pga_vh = np.linspace(0.05, 0.25, num=10)
cond_vh = hazus_conditional_liquefaction_probability(pga_vh, "vh")
test_res = np.array(
[0.0, 0.0, 0.0385, 0.2405, 0.4425, 0.6445, 0.8465, 1.0, 1.0, 1.0]
)
np.testing.assert_array_almost_equal(cond_vh, test_res)
| agpl-3.0 | 2,078,291,846,046,131,700 | 32.340136 | 79 | 0.544379 | false |
MazamaScience/ispaq | ispaq/utils.py | 1 | 7864 | """
Utility functions for ISPAQ.
:copyright:
Mazama Science
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import pandas as pd
from obspy import UTCDateTime
# Utility functions ------------------------------------------------------------
def write_simple_df(df, filepath, sigfigs=6):
"""
Write a pretty dataframe with appropriate significant figures to a .csv file.
:param df: Dataframe of simpleMetrics.
:param filepath: File to be created.
:param sigfigs: Number of significant figures to use.
:return: status
"""
if df is None:
raise("Dataframe of simple metrics does not exist.")
# Sometimes 'starttime' and 'endtime' get converted from UTCDateTime to float and need to be
# converted back. Nothing happens if this column is already of type UTCDateTime.
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
# Get pretty values
pretty_df = format_simple_df(df, sigfigs=sigfigs)
# Reorder columns, putting non-standard columns at the end and omitting 'qualityFlag'
columns = ['snclq','starttime','endtime','metricName','value']
original_columns = pretty_df.columns
extra_columns = list( set(original_columns).difference(set(columns)) )
extra_columns.remove('qualityFlag')
columns.extend(extra_columns)
# Write out .csv file
pretty_df[columns].to_csv(filepath, index=False)
# No return value
def format_simple_df(df, sigfigs=6):
"""
Create a pretty dataframe with appropriate significant figures.
:param df: Dataframe of simpleMetrics.
:param sigfigs: Number of significant figures to use.
:return: Dataframe of simpleMetrics.
The following conversions take place:
* Round the 'value' column to the specified number of significant figures.
* Convert 'starttime' and 'endtime' to python 'date' objects.
"""
# TODO: Why is type(df.value[0]) = 'str' at this point? Because metrics are always character strings?
# First convert 'N' to missing value
N_mask = df.value.str.contains('^N$')
df.loc[N_mask,'value'] = np.nan
# Then conver the rest of the values to float
df.value = df.value.astype(float)
format_string = "." + str(sigfigs) + "g"
df.value = df.value.apply(lambda x: format(x, format_string))
if 'starttime' in df.columns:
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.starttime = df.starttime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
if 'endtime' in df.columns:
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
# NOTE: df.time from SNR metric is already a string, otherwise it is NA
#if 'time' in df.columns:
#df.time = df.time.apply(lambda x: x.format_iris_web_service())
if 'qualityFlag' in df.columns:
df.qualityFlag = df.qualityFlag.astype(int)
return df
def write_numeric_df(df, filepath, sigfigs=6):
"""
Write a pretty dataframe with appropriate significant figures to a .csv file.
:param df: PSD dataframe.
:param filepath: File to be created.
:param sigfigs: Number of significant figures to use.
:return: status
"""
# Get pretty values
pretty_df = format_numeric_df(df, sigfigs=sigfigs)
# Write out .csv file
pretty_df.to_csv(filepath, index=False)
# No return value
def format_numeric_df(df, sigfigs=6):
"""
Create a pretty dataframe with appropriate significant figures.
:param df: Dataframe with only UTCDateTimes or numeric.
:param sigfigs: Number of significant figures to use.
:return: Dataframe of simpleMetrics.
The following conversions take place:
* Round the 'value' column to the specified number of significant figures.
* Convert 'starttime' and 'endtime' to python 'date' objects.
"""
format_string = "." + str(sigfigs) + "g"
for column in df.columns:
if column == 'starttime':
df.starttime = df.starttime.apply(UTCDateTime, precision=0) # no milliseconds
df.starttime = df.starttime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
elif column == 'endtime':
df.endtime = df.endtime.apply(UTCDateTime, precision=0) # no milliseconds
df.endtime = df.endtime.apply(lambda x: x.strftime("%Y-%m-%dT%H:%M:%S"))
elif column == 'target':
pass # 'target' is the SNCL Id
else:
df[column] = df[column].astype(float)
df[column] = df[column].apply(lambda x: format(x, format_string))
return df
def get_slot(r_object, prop):
"""
Return a property from the R_Stream.
:param r_object: IRISSeismic Stream, Trace or TraceHeader object
:param prop: Name of slot in the R object or any child object
:return: python version value contained in the named property (aka 'slot')
This convenience function allows business logic code to easily extract
any property that is an atomic value in one of the R objects defined in
the IRISSeismic R package.
IRISSeismic slots as of 2016-04-07
stream_slots = r_stream.slotnames()
* url
* requestedStarttime
* requestedEndtime
* act_flags
* io_flags
* dq_flags
* timing_qual
* traces
trace_slots = r_stream.do_slot('traces')[0].slotnames()
* stats
* Sensor
* InstrumentSensitivity
* InputUnits
* data
stats_slots = r_stream.do_slot('traces')[0].do_slot('stats').slotnames()
* sampling_rate
* delta
* calib
* npts
* network
* location
* station
* channel
* quality
* starttime
* endtime
* processing
"""
slotnames = list(r_object.slotnames())
# R Stream object
if 'traces' in slotnames:
if prop in ['traces']:
# return traces as R objects
return r_object.do_slot(prop)
elif prop in ['requestedStarttime','requestedEndtime']:
# return times as UTCDateTime
return UTCDateTime(r_object.do_slot(prop)[0])
elif prop in slotnames:
# return atmoic types as is
return r_object.do_slot(prop)[0]
else:
# looking for a property from from lower down the hierarchy
r_object = r_object.do_slot('traces')[0]
slotnames = list(r_object.slotnames())
# R Trace object
if 'stats' in slotnames:
if prop in ['stats']:
# return stats as an R object
return r_object.do_slot(prop)
elif prop in ['data']:
# return data as an array
return list(r_object.do_slot(prop))
elif prop in slotnames:
# return atmoic types as is
return r_object.do_slot(prop)[0]
else:
# looking for a property from from lower down the hierarchy
r_object = r_object.do_slot('stats')
slotnames = list(r_object.slotnames())
# R TraceHeader object
if 'processing' in slotnames:
if prop in ['starttime','endtime']:
# return times as UTCDateTime
return UTCDateTime(r_object.do_slot(prop)[0])
else:
# return atmoic types as is
return r_object.do_slot(prop)[0]
# Should never get here
raise('"%s" is not a recognized slot name' % (prop))
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| gpl-3.0 | -5,262,944,125,174,644,000 | 34.107143 | 106 | 0.626907 | false |
geodynamics/pylith | examples/2d/subduction/viz/plot_shearratio.py | 1 | 4434 | #!/usr/bin/env pvpython
# -*- Python -*- (syntax highlighting)
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md.md for license information.
#
# ----------------------------------------------------------------------
# Plot the undeformed domain as a gray wireframe and then the fault
# surfaces, colored by the magnitude of fault slip.
#
# This Python script runs using pvpython or within the ParaView Python
# shell.
# User-specified parameters.
#
# Default values for parameters. To use different values, overwrite
# them in the ParaView Python shell or on the command line. For
# example, set OUTPUT_DIR to the absolute path if not starting
# ParaView from the terminal shell where you ran PyLith:
#
# import os
# OUTPUT_DIR = os.path.join(os.environ["HOME"], "src", "pylith", "examples", "2d", "subduction", "output")
DEFAULTS = {
"OUTPUT_DIR": "output",
"SIM": "step05",
"FAULTS": ["fault-slabtop"],
"TIMESTEP": 0, # Use 0 for first, -1 for last.
}
# ----------------------------------------------------------------------
from paraview.simple import *
import os
import numpy
def visualize(parameters):
# Disable automatic camera reset on "Show"
paraview.simple._DisableFirstRenderCameraReset()
# Read fault data
dataFaults = []
for fault in parameters.faults:
filename = os.path.join(parameters.output_dir, "%s-%s.xmf" % (parameters.sim, fault))
if not os.path.isfile(filename):
raise IOError("File '%s' does not exist." % filename)
data = XDMFReader(FileNames=[filename])
RenameSource("%s-%s" % (parameters.sim, fault), data)
dataFaults.append(data)
groupFaults = GroupDatasets(Input=dataFaults)
scene = GetAnimationScene()
scene.UpdateAnimationUsingDataTimeSteps()
view = GetActiveViewOrCreate('RenderView')
# Ratio of shear to normal traction
calculatorRatio = Calculator(Input=groupFaults)
calculatorRatio.Function = '-abs(traction_X)/traction_Y'
calculatorRatio.ResultArrayName = 'shearRatio'
ratioDisplay = Show(calculatorRatio, view)
ColorBy(ratioDisplay, ('POINTS', 'shearRatio'))
ratioDisplay.RescaleTransferFunctionToDataRange(True)
ratioDisplay.SetScalarBarVisibility(view, True)
ratioDisplay.SetRepresentationType('Wireframe')
ratioDisplay.LineWidth = 8.0
# Rescale color and/or opacity maps used to exactly fit the current data range
ratioLUT = GetColorTransferFunction('shearDivNormal')
ratioDisplay.RescaleTransferFunctionToDataRange(False, False)
# Update a scalar bar component title.
UpdateScalarBarsComponentTitle(ratioLUT, ratioDisplay)
# Annotate time
tstamp = AnnotateTimeFilter(groupFaults)
tstamp.Format = 'Time: %2.0f yr'
tstamp.Scale = 3.168808781402895e-08 # seconds to years
tstampDisplay = Show(tstamp, view)
tstampDisplay.FontFamily = "Courier"
tstampDisplay.FontSize = 14
view.ResetCamera()
view.Update()
Render()
class Parameters(object):
keys = ("OUTPUT_DIR", "SIM", "FAULTS")
def __init__(self):
globalVars = globals()
for key in Parameters.keys:
if key in globalVars.keys():
setattr(self, key.lower(), globalVars[key])
else:
setattr(self, key.lower(), DEFAULTS[key])
return
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Running from outside the ParaView GUI via pvpython
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--sim", action="store", dest="sim", default=DEFAULTS["SIM"])
parser.add_argument("--faults", action="store", dest="faults")
args = parser.parse_args()
if args.faults:
args.faults = args.faults.split(",")
else:
args.faults = DEFAULT["FAULTS"]
visualize(args.sim)
view = GetRenderView()
view.ViewSize = [960, 540]
view.Update()
Interact()
else:
# Running inside the ParaView GUI
visualize(Parameters())
# End of file
| mit | -3,442,991,986,056,270,300 | 30.899281 | 106 | 0.632837 | false |
anshulc95/exch | exch/cli.py | 1 | 2590 | #!/usr/bin/env python3
""""
exch
=====
A CLI application built using python to see currency exchange rates.
:copyright: (c) 2017 by Anshul Chauhan
"""
import json
import click
import pkg_resources
from exch.helpers import fixer, fixer_sync
from exch.file_handling import get_default_base, get_default_target,\
set_default_base, set_default_target
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
RATES_FIXER_JSON_FILE = pkg_resources.resource_filename('exch', 'data/fixer_rates.json')
DEFAULT_JSON_FILE = pkg_resources.resource_filename('exch', 'data/defaults.json')
@click.group(invoke_without_command=True)
@click.pass_context
@click.option('--base', '-b', default=get_default_base(DEFAULT_JSON_FILE),
type=str, show_default=True,
help='Currency you are converting from.')
@click.option('--target', '-t', default=get_default_target(DEFAULT_JSON_FILE),
type=str, show_default=True, help='Currency you\'re converting to.')
@click.option('--amount', '-a', default=1.0, type=float, show_default=True,
help='Amount to convert.')
@click.option('--set_base', '-sb', is_flag=True, default=False,
help='Set new default base.')
@click.option('--set_target', '-st', is_flag=True, default=False,
help='Set new default target.')
def cli(ctx, base, target, amount, set_base, set_target):
"""
Get the latetst currency exchange rates from:
\b
- fixer.io
"""
if ctx.invoked_subcommand is None:
output = fixer(base, target, amount, RATES_FIXER_JSON_FILE)
if isinstance(output, float):
# 2:.2f for two decimal values, manually specified
output = "{0} {1} = {2:.2f} {3}".format(amount, base, output, target)
if set_base:
set_default_base(base, DEFAULT_JSON_FILE)
if set_target:
set_default_target(target, DEFAULT_JSON_FILE)
click.echo(output)
# subcommands
@cli.command()
def currencies():
""" prints the list of currencies available """
with open(RATES_FIXER_JSON_FILE) as rates_json_file:
json_rates = json.load(rates_json_file)
list_of_currencies = []
list_of_currencies.append(json_rates['base'])
for key in json_rates['rates']:
list_of_currencies.append(key)
list_of_currencies.sort()
click.echo(', '.join(list_of_currencies))
@cli.command()
def sync():
""" download the latest rates """
if fixer_sync(RATES_FIXER_JSON_FILE) in range(200, 300):
click.echo("New rates have been saved.")
| mit | 850,260,063,519,048,700 | 32.636364 | 88 | 0.642085 | false |
devpixelwolf/Pixelboard | src/money/service.py | 1 | 1135 | # -*- coding: utf-8 -*-
from datetime import datetime
from urllib2 import urlopen
import json
from django.http.response import JsonResponse
from django.shortcuts import render_to_response
def get_embed():
info = get_info()
return render_to_response('money/widget.html', {'info': info}).content
def get_info():
url = 'http://api.promasters.net.br/cotacao/v1/valores'
resposta = urlopen(url).read()
data = json.loads(resposta.decode('utf-8'))
# Acessando os valores/chaves do dicionario/Json.
ARS = (data['valores']['ARS']['valor'])
BTC = (data['valores']['BTC']['valor'])
EUR = (data['valores']['EUR']['valor'])
GBP = (data['valores']['GBP']['valor'])
USD = (data['valores']['USD']['valor'])
return {'ars': {'value': ('%.2f' % ARS).replace('.', ','), 'char': '$'},
'btc': {'value': ('%.2f' % BTC).replace('.', ','), 'char': 'ΰΈΏ'},
'eur': {'value': ('%.2f' % EUR).replace('.', ','), 'char': 'β¬'},
'gbp': {'value': ('%.2f' % GBP).replace('.', ','), 'char': 'Β£'},
'usd': {'value': ('%.4f' % USD).replace('.', ','), 'char': 'US$'}}
| gpl-3.0 | 7,660,595,988,941,571,000 | 34.3125 | 78 | 0.537168 | false |
willm/DDEXUI | ddex/tests/test_resource_manager.py | 1 | 3903 | import unittest
from shutil import rmtree
from os import path
from tempfile import gettempdir
import uuid
from DDEXUI.file_parser import FileParser
from DDEXUI.ddex.resource import SoundRecording, Image
from DDEXUI.resource_manager import ResourceManager
class ResourceManagerSoundRecordingTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.upc = "49024343245"
self.isrc = "FR343245"
rmtree(self.upc, ignore_errors=True)
self.root_folder = gettempdir()
self.batch_id = str(uuid.uuid4())
self.title = "the title"
file_path = path.join('ddex', 'tests', 'resources', 'test.mp3')
self.resource_reference = "A1"
self.technical_resource_details_reference = "T1"
self.expected = SoundRecording(self.resource_reference, self.isrc, self.title, FileParser().parse(file_path), self.technical_resource_details_reference)
self.subject = ResourceManager(FileParser(), self.batch_id, self.root_folder)
self.resource = self.subject.add_sound_recording(self.upc, file_path, self.isrc, self.title, self.resource_reference, self.technical_resource_details_reference)
def test_should_copy_file_to_product_resources_folder(self):
expected_path = path.join(self.root_folder, self.batch_id, self.upc, 'resources', "{}_{}.mp3".format(self.isrc, self.technical_resource_details_reference))
self.assertTrue(path.isfile(expected_path), "expected {} to exist".format(expected_path))
def test_should_create_resource_with_isrc(self):
self.assertEqual(self.resource.isrc, self.expected.isrc)
def test_should_create_resource_with_title(self):
self.assertEqual(self.resource.title, self.expected.title)
def test_should_create_resource_with_resource_reference(self):
self.assertEqual(self.resource.resource_reference(), self.resource_reference)
def test_should_create_resource_with_technical_resource_details_reference(self):
self.assertEqual(self.resource.technical_resource_details_reference, self.technical_resource_details_reference)
def test_should_create_resource_with_file(self):
self.assertEqual(self.resource.file_metadata.md5, self.expected.file_metadata.md5)
class ResourceManagerImageTests(unittest.TestCase):
@classmethod
def setUpClass(self):
self.upc = "49024343245"
self.isrc = "FR343245"
rmtree(self.upc, ignore_errors=True)
self.root_folder = gettempdir()
self.batch_id = str(uuid.uuid4())
self.title = "the title"
file_path = path.join('ddex', 'tests', 'resources', 'test.jpg')
self.resource_reference = "A2"
self.technical_resource_details_reference = "T4"
self.expected = Image(self.resource_reference, self.upc, FileParser().parse(file_path), '')
self.subject = ResourceManager(FileParser(), self.batch_id, self.root_folder)
self.resource = self.subject.add_image(self.upc, file_path, self.resource_reference, self.technical_resource_details_reference)
def test_should_copy_file_to_product_resources_folder(self):
expected_path = path.join(self.root_folder, self.batch_id, self.upc, 'resources', self.upc+'.jpg')
self.assertTrue(path.isfile(expected_path))
def test_should_create_resource_with_upc(self):
self.assertEqual(self.resource.id_value(), self.upc)
def test_should_create_resource_with_file(self):
self.assertEqual(self.resource.file_metadata.md5, self.expected.file_metadata.md5)
def test_should_create_resource_with_resource_reference(self):
self.assertEqual(self.resource.resource_reference(), self.resource_reference)
def test_should_create_resource_with_technical_resource_details_reference(self):
self.assertEqual(self.resource.technical_resource_details_reference, self.technical_resource_details_reference)
| gpl-2.0 | 2,645,101,115,123,224,000 | 46.597561 | 168 | 0.71919 | false |
wangjiezhe/FetchNovels | novel/base.py | 1 | 3729 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from urllib.error import HTTPError
from urllib.parse import urlparse
import pypinyin
from lxml.etree import XMLSyntaxError
from pyquery import PyQuery
from requests import ConnectionError
from novel.config import get_headers, update_and_save_novel_list
from novel.decorators import retry
from novel.utils import Tool
class BaseNovel(object):
def __init__(self, url,
headers=None, proxies=None,
encoding='UTF-8', tool=None,
tid=None, cache=False):
self.url = url
self._headers = headers or get_headers()
self._proxies = proxies
self.encoding = encoding
self.tool = tool or Tool
self._tid = tid
self.cache = cache
self.running = False
self.overwrite = True
self.refine = self.doc = None
self.title = self.author = ''
@property
def tid(self):
if self._tid is not None:
return str(self._tid)
else:
tp = pypinyin.slug(self.title, errors='ignore', separator='_')
ap = pypinyin.slug(self.author, errors='ignore', separator='_')
tid = '{} {}'.format(tp, ap)
return tid
@classmethod
def get_source_from_class(cls):
return cls.__name__.lower()
def get_source_from_url(self):
source = urlparse(self.url).netloc
source = source.lstrip('www.').replace('.', '_')
return source
@property
def source(self):
return self.get_source_from_class()
def run(self, refresh=False):
if self.running and not refresh:
return
self.refine = self.tool().refine
self.doc = self.get_doc()
self.running = True
def close(self):
return
def update_novel_list(self):
update_and_save_novel_list(self.source, self.tid)
@retry((HTTPError, XMLSyntaxError, ConnectionError))
def get_doc(self):
return PyQuery(url=self.url, headers=self.headers,
proxies=self.proxies, encoding=self.encoding)
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value):
self._headers = value or {}
@property
def proxies(self):
return self._proxies
@proxies.setter
def proxies(self, value):
self._proxies = value or {}
def dump(self):
raise NotImplementedError('dump')
def dump_and_close(self):
self.run()
self.dump()
self.close()
class SinglePage(BaseNovel):
def __init__(self, url, selector,
headers=None, proxies=None,
encoding='UTF-8', tool=None,
tid=None, cache=False):
super().__init__(url, headers, proxies, encoding, tool, tid, cache)
self.selector = selector
self.content = ''
def run(self, refresh=False):
super().run(refresh=refresh)
if not self.title:
self.title = self.get_title()
if not self.cache:
self.content = self.get_content()
def get_content(self):
if not self.selector:
return ''
content = self.doc(self.selector).html() or ''
content = self.refine(content)
return content
def get_title(self):
if self.title:
return self.title
else:
raise NotImplementedError('get_title')
def dump(self):
filename = '{self.title}.txt'.format(self=self)
print(self.title)
with open(filename, 'w') as fp:
fp.write(self.title)
fp.write('\n\n\n\n')
fp.write(self.content)
fp.write('\n')
| gpl-3.0 | -2,477,648,472,728,676,000 | 25.635714 | 75 | 0.574685 | false |
pkgw/pwkit | pwkit/lsqmdl.py | 1 | 35401 | # -*- mode: python; coding: utf-8 -*-
# Copyright 2012-2018 Peter Williams <[email protected]> and collaborators.
# Licensed under the MIT License.
"""Model data with least-squares fitting
This module provides tools for fitting models to data using least-squares
optimization.
"""
from __future__ import absolute_import, division, print_function
__all__ = 'ModelBase Model ComposedModel PolynomialModel ScaleModel'.split()
import numpy as np
try:
# numpy 1.7
import numpy.polynomial.polynomial as npoly
except ImportError:
import numpy.polynomial as npoly
from six import get_function_code
from six.moves import range, reduce
from . import binary_type, text_type
class Parameter(object):
"""Information about a parameter in a least-squares model.
These data may only be obtained after solving least-squares problem. These
objects reference information from their parent objects, so changing the
parent will alter the apparent contents of these objects.
"""
def __init__(self, owner, index):
self._owner = owner
self._index = index
def __repr__(self):
return '<Parameter "%s" (#%d) of %s>' % (self.name, self._index, self._owner)
@property
def index(self): # make this read-only
"The parameter's index in the Model's arrays."
return self._index
@property
def name(self):
"The parameter's name."
return self._owner.pnames[self._index]
@property
def value(self):
"The parameter's value."
return self._owner.params[self._index]
@property
def uncert(self):
"The uncertainty in :attr:`value`."
return self._owner.puncerts[self._index]
@property
def uval(self):
"Accesses :attr:`value` and :attr:`uncert` as a :class:`pwkit.msmt.Uval`."
from .msmt import Uval
return Uval.from_norm(self.value, self.uncert)
class ModelBase(object):
"""An abstract base class holding data and a model for least-squares fitting.
The models implemented in this module all derive from this class and so
inherit the attributes and methods described below.
A :class:`Parameter` data structure may be obtained by indexing this
object with either the parameter's numerical index or its name. I.e.::
m = Model(...).solve(...)
p = m['slope']
print(p.name, p.value, p.uncert, p.uval)
"""
data = None
"The data to be modeled; an *n*-dimensional Numpy array."
invsigma = None
"Data weights: 1/Ο for each data point."
params = None
"After fitting, a Numpy ndarray of solved model parameters."
puncerts = None
"After fitting, a Numpy ndarray of 1Ο uncertainties on the model parameters."
pnames = None
"A list of textual names for the parameters."
covar = None
"""After fitting, the variance-covariance matrix representing the parameter
uncertainties.
"""
mfunc = None
"""After fitting, a callable function evaluating the model fixed at best params.
The resulting function may or may not take arguments depending on the particular
kind of model being evaluated.
"""
mdata = None
"After fitting, the modeled data at the best parameters."
chisq = None
"After fitting, the ΟΒ² of the fit."
rchisq = None
"After fitting, the reduced ΟΒ² of the fit, or None if there are no degrees of freedom."
resids = None
"After fitting, the residuals: ``resids = data - mdata``."
def __init__(self, data, invsigma=None):
self.set_data(data, invsigma)
def set_data(self, data, invsigma=None):
"""Set the data to be modeled.
Returns *self*.
"""
self.data = np.array(data, dtype=np.float, ndmin=1)
if invsigma is None:
self.invsigma = np.ones(self.data.shape)
else:
i = np.array(invsigma, dtype=np.float)
self.invsigma = np.broadcast_arrays(self.data, i)[1] # allow scalar invsigma
if self.invsigma.shape != self.data.shape:
raise ValueError('data values and inverse-sigma values must have same shape')
return self
def print_soln(self):
"""Print information about the model solution."""
lmax = reduce(max,(len(x) for x in self.pnames), len('r chi sq'))
if self.puncerts is None:
for pn, val in zip(self.pnames, self.params):
print('%s: %14g' % (pn.rjust(lmax), val))
else:
for pn, val, err in zip(self.pnames, self.params, self.puncerts):
frac = abs(100. * err / val)
print('%s: %14g +/- %14g (%.2f%%)' % (pn.rjust(lmax), val, err, frac))
if self.rchisq is not None:
print('%s: %14g' % ('r chi sq'.rjust(lmax), self.rchisq))
elif self.chisq is not None:
print('%s: %14g' % ('chi sq'.rjust(lmax), self.chisq))
else:
print('%s: unknown/undefined' % ('r chi sq'.rjust(lmax)))
return self
def make_frozen_func(self, params):
"""Return a data-generating model function frozen at the specified parameters.
As with the :attr:`mfunc` attribute, the resulting function may or may
not take arguments depending on the particular kind of model being
evaluated.
"""
raise NotImplementedError()
def __getitem__(self, key):
if isinstance(key, binary_type):
# If you're not using the unicode_literals __future__, things get
# annoying really quickly without this.
key = text_type(key)
if isinstance(key, int):
idx = key
if idx < 0 or idx >= len(self.pnames):
raise ValueError('illegal parameter number %d' % key)
elif isinstance(key, text_type):
try:
idx = self.pnames.index(key)
except ValueError:
raise ValueError('no such parameter named "%s"' % key)
else:
raise ValueError('illegal parameter key %r' % key)
return Parameter(self, idx)
def plot(self, modelx, dlines=False, xmin=None, xmax=None,
ymin=None, ymax=None, **kwargs):
"""Plot the data and model (requires `omega`).
This assumes that `data` is 1D and that `mfunc` takes one argument
that should be treated as the X variable.
"""
import omega as om
modelx = np.asarray(modelx)
if modelx.shape != self.data.shape:
raise ValueError('modelx and data arrays must have same shape')
modely = self.mfunc(modelx)
sigmas = self.invsigma**-1 # TODO: handle invsigma = 0
vb = om.layout.VBox(2)
vb.pData = om.quickXYErr(modelx, self.data, sigmas,
'Data', lines=dlines, **kwargs)
vb[0] = vb.pData
vb[0].addXY(modelx, modely, 'Model')
vb[0].setYLabel('Y')
vb[0].rebound(False, True)
vb[0].setBounds(xmin, xmax, ymin, ymax)
vb[1] = vb.pResid = om.RectPlot()
vb[1].defaultField.xaxis = vb[1].defaultField.xaxis
vb[1].addXYErr(modelx, self.resids, sigmas, None, lines=False)
vb[1].setLabels('X', 'Residuals')
vb[1].rebound(False, True)
# ignore Y values since residuals are on different scale:
vb[1].setBounds(xmin, xmax)
vb.setWeight(0, 3)
return vb
def show_cov(self):
"Show the parameter covariance matrix with `pwkit.ndshow_gtk3`."
# would be nice: labels with parameter names (hard because this is
# ndshow, not omegaplot)
from .ndshow_gtk3 import view
view(self.covar, title='Covariance Matrix')
def show_corr(self):
"Show the parameter correlation matrix with `pwkit.ndshow_gtk3`."
from .ndshow_gtk3 import view
d = np.diag(self.covar) ** -0.5
corr = self.covar * d[np.newaxis,:] * d[:,np.newaxis]
view(corr, title='Correlation Matrix')
class Model(ModelBase):
"""Models data with a generic nonlinear optimizer
Basic usage is::
def func(p1, p2, x):
simulated_data = p1 * x + p2
return simulated_data
x = [1, 2, 3]
data = [10, 14, 15.8]
mdl = Model(func, data, args=(x,)).solve(guess).print_soln()
The :class:`Model` constructor can take an optional argument ``invsigma``
after ``data``; it specifies *inverse sigmas*, **not** inverse *variances*
(the usual statistical weights), for the data points. Since most
applications deal in sigmas, take care to write::
m = Model(func, data, 1. / uncerts) # right!
not::
m = Model(func, data, uncerts) # WRONG
If you have zero uncertainty on a measurement, you must wind a way to
express that constraint without including that measurement as part of the
``data`` vector.
"""
lm_prob = None
"""A :class:`pwkit.lmmin.Problem` instance describing the problem to be solved.
After setting up the data-generating function, you can access this item to
tune the solver.
"""
def __init__(self, simple_func, data, invsigma=None, args=()):
if simple_func is not None:
self.set_simple_func(simple_func, args)
if data is not None:
self.set_data(data, invsigma)
def set_func(self, func, pnames, args=()):
"""Set the model function to use an efficient but tedious calling convention.
The function should obey the following convention::
def func(param_vec, *args):
modeled_data = { do something using param_vec }
return modeled_data
This function creates the :class:`pwkit.lmmin.Problem` so that the
caller can futz with it before calling :meth:`solve`, if so desired.
Returns *self*.
"""
from .lmmin import Problem
self.func = func
self._args = args
self.pnames = list(pnames)
self.lm_prob = Problem(len(self.pnames))
return self
def set_simple_func(self, func, args=()):
"""Set the model function to use a simple but somewhat inefficient calling
convention.
The function should obey the following convention::
def func(param0, param1, ..., paramN, *args):
modeled_data = { do something using the parameters }
return modeled_data
Returns *self*.
"""
code = get_function_code(func)
npar = code.co_argcount - len(args)
pnames = code.co_varnames[:npar]
def wrapper(params, *args):
return func(*(tuple(params) + args))
return self.set_func(wrapper, pnames, args)
def make_frozen_func(self, params):
"""Returns a model function frozen to the specified parameter values.
Any remaining arguments are left free and must be provided when the
function is called.
For this model, the returned function is the application of
:func:`functools.partial` to the :attr:`func` property of this object.
"""
params = np.array(params, dtype=np.float, ndmin=1)
from functools import partial
return partial(self.func, params)
def solve(self, guess):
"""Solve for the parameters, using an initial guess.
This uses the Levenberg-Marquardt optimizer described in
:mod:`pwkit.lmmin`.
Returns *self*.
"""
guess = np.array(guess, dtype=np.float, ndmin=1)
f = self.func
args = self._args
def lmfunc(params, vec):
vec[:] = f(params, *args).flatten()
self.lm_prob.set_residual_func(self.data.flatten(),
self.invsigma.flatten(),
lmfunc, None)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
self.mfunc = self.make_frozen_func(soln.params)
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
return self
class PolynomialModel(ModelBase):
"""Least-squares polynomial fit.
Because this is a very specialized kind of problem, we don't need an
initial guess to solve, and we can use fast built-in numerical routines.
The output parameters are named "a0", "a1", ... and are stored in that
order in PolynomialModel.params[]. We have ``y = sum(x**i * a[i])``, so
"a2" = "params[2]" is the quadratic term, etc.
This model does *not* give uncertainties on the derived coefficients. The
as_nonlinear() method can be use to get a `Model` instance with
uncertainties.
Methods:
as_nonlinear - Return a (lmmin-based) `Model` equivalent to self.
"""
def __init__(self, maxexponent, x, data, invsigma=None):
self.maxexponent = maxexponent
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: npoly.polyval(x, params)
def solve(self):
self.pnames = ['a%d' % i for i in range(self.maxexponent + 1)]
self.params = npoly.polyfit(self.x, self.data, self.maxexponent,
w=self.invsigma)
self.puncerts = None # does anything provide this? could farm out to lmmin ...
self.covar = None
self.mfunc = self.make_frozen_func(self.params)
self.mdata = self.mfunc(self.x)
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
if self.x.size > self.maxexponent + 1:
self.rchisq = self.chisq / (self.x.size - (self.maxexponent + 1))
return self
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm
class ScaleModel(ModelBase):
"""Solve `data = m * x` for `m`."""
def __init__(self, x, data, invsigma=None):
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
self.set_data(data, invsigma)
def make_frozen_func(self, params):
return lambda x: params[0] * x
def solve(self):
w2 = self.invsigma**2
sxx = np.dot(self.x**2, w2)
sxy = np.dot(self.x * self.data, w2)
m = sxy / sxx
uc_m = 1. / np.sqrt(sxx)
self.pnames = ['m']
self.params = np.asarray([m])
self.puncerts = np.asarray([uc_m])
self.covar = self.puncerts.reshape((1, 1))
self.mfunc = lambda x: m * x
self.mdata = m * self.x
self.resids = self.data - self.mdata
self.chisq = ((self.resids * self.invsigma)**2).sum()
self.rchisq = self.chisq / (self.x.size - 1)
return self
# lmmin-based model-fitting when the model is broken down into composable
# components.
class ModelComponent(object):
npar = 0
name = None
pnames = ()
nmodelargs = 0
setguess = None
setvalue = None
setlimit = None
_accum_mfunc = None
def __init__(self, name=None):
self.name = name
def _param_names(self):
"""Overridable in case the list of parameter names needs to be
generated on the fly."""
return self.pnames
def finalize_setup(self):
"""If the component has subcomponents, this should set their `name`,
`setguess`, `setvalue`, and `setlimit` properties. It should also
set `npar` (on self) to the final value."""
pass
def prep_params(self):
"""This should make any necessary calls to `setvalue` or `setlimit`,
though in straightforward cases it should just be up to the user to
do this. If the component has subcomponents, their `prep_params`
functions should be called."""
pass
def model(self, pars, mdata):
"""Modify `mdata` based on `pars`."""
pass
def deriv(self, pars, jac):
"""Compute the Jacobian. `jac[i]` is d`mdata`/d`pars[i]`."""
pass
def extract(self, pars, perr, cov):
"""Extract fit results into the object for ease of inspection."""
self.covar = cov
def _outputshape(self, *args):
"""This is a helper for evaluating the model function at fixed parameters. To
work in the ComposedModel paradigm, we have to allocate an empty array
to hold the model output before we can fill it via the _accum_mfunc
functions. We can't do that without knowing what size it will be. That
size has to be a function of the "free" parameters to the model
function that are implicit/fixed during the fitting process. Given these "free"
parameters, _outputshape returns the shape that the output will have."""
raise NotImplementedError()
def mfunc(self, *args):
if len(args) != self.nmodelargs:
raise TypeError('model function expected %d arguments, got %d' %
(self.nmodelargs, len(args)))
result = np.zeros(self._outputshape(*args))
self._accum_mfunc(result, *args)
return result
class ComposedModel(ModelBase):
def __init__(self, component, data, invsigma=None):
if component is not None:
self.set_component(component)
if data is not None:
self.set_data(data, invsigma)
def _component_setguess(self, vals, ofs=0):
vals = np.asarray(vals)
if ofs < 0 or ofs + vals.size > self.component.npar:
raise ValueError('ofs %d, vals.size %d, npar %d' %
(ofs, vals.size, self.component.npar))
self.force_guess[ofs:ofs+vals.size] = vals
def _component_setvalue(self, cidx, val, fixed=False):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_value(cidx, val, fixed=fixed)
self.force_guess[cidx] = val
def _component_setlimit(self, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= self.component.npar:
raise ValueError('cidx %d, npar %d' % (cidx, self.component.npar))
self.lm_prob.p_limit(cidx, lower, upper)
def set_component(self, component):
self.component = component
component.setguess = self._component_setguess
component.setvalue = self._component_setvalue
component.setlimit = self._component_setlimit
component.finalize_setup()
from .lmmin import Problem
self.lm_prob = Problem(component.npar)
self.force_guess = np.empty(component.npar)
self.force_guess.fill(np.nan)
self.pnames = list(component._param_names())
component.prep_params()
def solve(self, guess=None):
if guess is None:
guess = self.force_guess
else:
guess = np.array(guess, dtype=np.float, ndmin=1, copy=True)
for i in range(self.force_guess.size):
if np.isfinite(self.force_guess[i]):
guess[i] = self.force_guess[i]
def model(pars, outputs):
outputs.fill(0)
self.component.model(pars, outputs)
self.lm_model = model
self.lm_deriv = self.component.deriv
self.lm_prob.set_residual_func(self.data, self.invsigma, model,
self.component.deriv)
self.lm_soln = soln = self.lm_prob.solve(guess)
self.params = soln.params
self.puncerts = soln.perror
self.covar = soln.covar
# fvec = resids * invsigma = (data - mdata) * invsigma
self.resids = self.lm_soln.fvec.reshape(self.data.shape) / self.invsigma
self.mdata = self.data - self.resids
# lm_soln.fnorm can be unreliable ("max(fnorm, fnorm1)" branch)
self.chisq = (self.lm_soln.fvec**2).sum()
if soln.ndof > 0:
self.rchisq = self.chisq / soln.ndof
self.component.extract(soln.params, soln.perror, soln.covar)
return self
def make_frozen_func(self):
return self.component.mfunc
def mfunc(self, *args):
return self.component.mfunc(*args)
def debug_derivative(self, guess):
"""returns (explicit, auto)"""
from .lmmin import check_derivative
return check_derivative(self.component.npar, self.data.size,
self.lm_model, self.lm_deriv, guess)
# Now specific components useful in the above framework. The general strategy
# is to err on the side of having additional parameters in the individual
# classes, and the user can call setvalue() to fix them if they're not needed.
class AddConstantComponent(ModelComponent):
npar = 1
pnames = ('value', )
nmodelargs = 0
def model(self, pars, mdata):
mdata += pars[0]
def deriv(self, pars, jac):
jac[0] = 1.
def _outputshape(self):
return()
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars[0]
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_value = pars[0]
self.u_value = perr[0]
class AddValuesComponent(ModelComponent):
"""XXX terminology between this and AddConstant is mushy."""
nmodelargs = 0
def __init__(self, nvals, name=None):
super(AddValuesComponent, self).__init__(name)
self.npar = nvals
def _param_names(self):
for i in range(self.npar):
yield 'v%d' % i
def model(self, pars, mdata):
mdata += pars
def deriv(self, pars, jac):
jac[:,:] = np.eye(self.npar)
def _outputshape(self):
return(self.npar,)
def extract(self, pars, perr, cov):
def _accum_mfunc(res):
res += pars
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_vals = pars
self.u_vals = perr
class AddPolynomialComponent(ModelComponent):
nmodelargs = 1
def __init__(self, maxexponent, x, name=None):
super(AddPolynomialComponent, self).__init__(name)
self.npar = maxexponent + 1
self.x = np.array(x, dtype=np.float, ndmin=1, copy=False, subok=True)
def _param_names(self):
for i in range(self.npar):
yield 'c%d' % i
def model(self, pars, mdata):
mdata += npoly.polyval(self.x, pars)
def deriv(self, pars, jac):
w = np.ones_like(self.x)
for i in range(self.npar):
jac[i] = w
w *= self.x
def _outputshape(self, x):
return x.shape
def extract(self, pars, perr, cov):
def _accum_mfunc(res, x):
res += npoly.polyval(x, pars)
self._accum_mfunc = _accum_mfunc
self.covar = cov
self.f_coeffs = pars
self.u_coeffs = perr
def _broadcast_shapes(s1, s2):
"""Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together."""
n1 = len(s1)
n2 = len(s2)
n = max(n1, n2)
res = [1] * n
for i in range(n):
if i >= n1:
c1 = 1
else:
c1 = s1[n1-1-i]
if i >= n2:
c2 = 1
else:
c2 = s2[n2-1-i]
if c1 == 1:
rc = c2
elif c2 == 1 or c1 == c2:
rc = c1
else:
raise ValueError('array shapes %r and %r are not compatible' % (s1, s2))
res[n-1-i] = rc
return tuple(res)
class SeriesComponent(ModelComponent):
"""Apply a set of subcomponents in series, isolating each from the other. This
is only valid if every subcomponent except the first is additive --
otherwise, the Jacobian won't be right."""
def __init__(self, components=(), name=None):
super(SeriesComponent, self).__init__(name)
self.components = list(components)
def add(self, component):
"""This helps, but direct manipulation of self.components should be
supported."""
self.components.append(component)
return self
def _param_names(self):
for c in self.components:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = np.asarray(vals)
if subofs < 0 or subofs + vals.size > npar:
raise ValueError('subofs %d, vals.size %d, npar %d' %
(subofs, vals.size, npar))
return self.setguess(vals, ofs + subofs)
def _offset_setvalue(self, ofs, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(ofs + cidx, value, fixed)
def _offset_setlimit(self, ofs, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(ofs + cidx, lower, upper)
def finalize_setup(self):
from functools import partial
ofs = 0
self.nmodelargs = 0
for i, c in enumerate(self.components):
if c.name is None:
c.name = 'c%d' % i
c.setguess = partial(self._offset_setguess, ofs, c.npar)
c.setvalue = partial(self._offset_setvalue, ofs, c.npar)
c.setlimit = partial(self._offset_setlimit, ofs, c.npar)
c.finalize_setup()
ofs += c.npar
self.nmodelargs += c.nmodelargs
self.npar = ofs
def prep_params(self):
for c in self.components:
c.prep_params()
def model(self, pars, mdata):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
c.model(p, mdata)
ofs += c.npar
def deriv(self, pars, jac):
ofs = 0
for c in self.components:
p = pars[ofs:ofs+c.npar]
j = jac[ofs:ofs+c.npar]
c.deriv(p, j)
ofs += c.npar
def extract(self, pars, perr, cov):
ofs = 0
for c in self.components:
n = c.npar
spar = pars[ofs:ofs+n]
serr = perr[ofs:ofs+n]
scov = cov[ofs:ofs+n,ofs:ofs+n]
c.extract(spar, serr, scov)
ofs += n
def _outputshape(self, *args):
s = ()
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
s = _broadcast_shapes(s, c._outputshape(*cargs))
ofs += c.nmodelargs
return s
def _accum_mfunc(self, res, *args):
ofs = 0
for c in self.components:
cargs = args[ofs:ofs+c.nmodelargs]
c._accum_mfunc(res, *cargs)
ofs += c.nmodelargs
class MatMultComponent(ModelComponent):
"""Given a component yielding k**2 data points and k additional components,
each yielding n data points. The result is [A]Γ[B], where A is the square
matrix formed from the first component's output, and B is the (k, n)
matrix of stacked output from the final k components.
Parameters are ordered in same way as the components named above.
"""
def __init__(self, k, name=None):
super(MatMultComponent, self).__init__(name)
self.k = k
self.acomponent = None
self.bcomponents = [None] * k
def _param_names(self):
pfx = self.acomponent.name + '.' if self.acomponent.name is not None else ''
for p in self.acomponent._param_names():
yield pfx + p
for c in self.bcomponents:
pfx = c.name + '.' if c.name is not None else ''
for p in c._param_names():
yield pfx + p
def _offset_setguess(self, ofs, npar, vals, subofs=0):
vals = np.asarray(vals)
if subofs < 0 or subofs + vals.size > npar:
raise ValueError('subofs %d, vals.size %d, npar %d' %
(subofs, vals.size, npar))
return self.setguess(vals, ofs + subofs)
def _offset_setvalue(self, ofs, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(ofs + cidx, value, fixed)
def _offset_setlimit(self, ofs, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(ofs + cidx, lower, upper)
def finalize_setup(self):
from functools import partial
c = self.acomponent
if c.name is None:
c.name = 'a'
c.setguess = partial(self._offset_setguess, 0, c.npar)
c.setvalue = partial(self._offset_setvalue, 0, c.npar)
c.setlimit = partial(self._offset_setlimit, 0, c.npar)
c.finalize_setup()
ofs = c.npar
self.nmodelargs = c.nmodelargs
for i, c in enumerate(self.bcomponents):
if c.name is None:
c.name = 'b%d' % i
c.setguess = partial(self._offset_setguess, ofs, c.npar)
c.setvalue = partial(self._offset_setvalue, ofs, c.npar)
c.setlimit = partial(self._offset_setlimit, ofs, c.npar)
c.finalize_setup()
ofs += c.npar
self.nmodelargs += c.nmodelargs
self.npar = ofs
def prep_params(self):
self.acomponent.prep_params()
for c in self.bcomponents:
c.prep_params()
def _sep_model(self, pars, nd):
k = self.k
ma = np.zeros((k, k))
mb = np.zeros((k, nd))
c = self.acomponent
c.model(pars[:c.npar], ma.reshape(k**2))
pofs = c.npar
for i, c in enumerate(self.bcomponents):
p = pars[pofs:pofs+c.npar]
c.model(p, mb[i])
pofs += c.npar
return ma, mb
def model(self, pars, mdata):
k = self.k
nd = mdata.size // k
ma, mb = self._sep_model(pars, nd)
np.dot(ma, mb, mdata.reshape((k, nd)))
def deriv(self, pars, jac):
k = self.k
nd = jac.shape[1] // k
npar = self.npar
ma, mb = self._sep_model(pars, nd)
ja = np.zeros((npar, k, k))
jb = np.zeros((npar, k, nd))
c = self.acomponent
c.deriv(pars[:c.npar], ja[:c.npar].reshape((c.npar, k**2)))
pofs = c.npar
for i, c in enumerate(self.bcomponents):
p = pars[pofs:pofs+c.npar]
c.deriv(p, jb[pofs:pofs+c.npar,i,:])
pofs += c.npar
for i in range(self.npar):
jac[i] = (np.dot(ja[i], mb) + np.dot(ma, jb[i])).reshape(k * nd)
def extract(self, pars, perr, cov):
c = self.acomponent
c.extract(pars[:c.npar], perr[:c.npar], cov[:c.npar,:c.npar])
ofs = c.npar
for c in self.bcomponents:
n = c.npar
spar = pars[ofs:ofs+n]
serr = perr[ofs:ofs+n]
scov = cov[ofs:ofs+n,ofs:ofs+n]
c.extract(spar, serr, scov)
ofs += n
def _outputshape(self, *args):
aofs = self.acomponent.nmodelargs
sb = ()
for c in self.bcomponents:
a = args[aofs:aofs+c.nmodelargs]
sb = _broadcast_shapes(sb, c._outputshape(*a))
aofs += c.nmodelargs
return (self.k,) + sb
def _accum_mfunc(self, res, *args):
k = self.k
nd = res.shape[1]
ma = np.zeros((k, k))
mb = np.zeros((k, nd))
c = self.acomponent
c._accum_mfunc(ma.reshape(k**2), *(args[:c.nmodelargs]))
aofs = c.nmodelargs
for i, c in enumerate(self.bcomponents):
a = args[aofs:aofs+c.nmodelargs]
c._accum_mfunc(mb[i], *a)
aofs += c.nmodelargs
np.dot(ma, mb, res)
class ScaleComponent(ModelComponent):
npar = 1
def __init__(self, subcomp=None, name=None):
super(ScaleComponent, self).__init__(name)
self.setsubcomp(subcomp)
def setsubcomp(self, subcomp):
self.subcomp = subcomp
return self
def _param_names(self):
yield 'factor'
pfx = self.subcomp.name + '.' if self.subcomp.name is not None else ''
for p in self.subcomp._param_names():
yield pfx + p
def _sub_setguess(self, npar, cidx, vals, ofs=0):
vals = np.asarray(vals)
if ofs < 0 or ofs + vals.size > npar:
raise ValueError('ofs %d, vals.size %d, npar %d' %
(ofs, vals.size, npar))
return self.setguess(vals, ofs + 1)
def _sub_setvalue(self, npar, cidx, value, fixed=False):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setvalue(1 + cidx, value, fixed)
def _sub_setlimit(self, npar, cidx, lower=-np.inf, upper=np.inf):
if cidx < 0 or cidx >= npar:
raise ValueError('cidx %d, npar %d' % (cidx, npar))
return self.setlimit(1 + cidx, lower, upper)
def finalize_setup(self):
if self.subcomp.name is None:
self.subcomp.name = 'c'
from functools import partial
self.subcomp.setvalue = partial(self._sub_setvalue, self.subcomp.npar)
self.subcomp.setlimit = partial(self._sub_setvalue, self.subcomp.npar)
self.subcomp.finalize_setup()
self.npar = self.subcomp.npar + 1
self.nmodelargs = self.subcomp.nmodelargs
def prep_params(self):
self.subcomp.prep_params()
def model(self, pars, mdata):
self.subcomp.model(pars[1:], mdata)
mdata *= pars[0]
def deriv(self, pars, jac):
self.subcomp.model(pars[1:], jac[0])
self.subcomp.deriv(pars[1:], jac[1:])
jac[1:] *= pars[0]
def extract(self, pars, perr, cov):
self.f_factor = pars[0]
self.u_factor = perr[0]
self.c_factor = cov[0]
self.subcomp.extract(pars[1:], perr[1:], cov[1:,1:])
def _outputshape(self, *args):
return self.subcomp._outputshape(*args)
def _accum_mfunc(self, res, *args):
self.subcomp._accum_mfunc(res, *args)
| mit | -420,189,310,659,053,200 | 29.303082 | 91 | 0.581426 | false |
codito/pomito | tests/test_dispatcher.py | 1 | 2529 | # -*- coding: utf-8 -*-
"""Tests for message dispatcher."""
import unittest
from unittest.mock import Mock
import blinker
from pomito import main
class MessageTests(unittest.TestCase):
def test_send_calls_signal_send_with_kwargs(self):
mock_signal = Mock(blinker.Signal)
msg = main.Message(mock_signal, arg1="arg1", arg2=1)
msg.send()
mock_signal.send.assert_called_once_with(arg1="arg1", arg2=1)
class MessageDispatcherTests(unittest.TestCase):
def setUp(self):
dummy_signal = blinker.signal('dummy_signal')
self.test_message = main.Message(dummy_signal, arg1="arg1", arg2=1)
self.dispatcher = main.MessageDispatcher()
self.mock_callback = Mock()
def tearDown(self):
if self.dispatcher.is_alive():
self.dispatcher.stop()
self.dispatcher.join()
def test_queue_message_throws_for_invalid_message(self):
self.assertRaises(TypeError, self.dispatcher.queue_message, None)
def test_queue_message_doesnt_queue_message_if_there_are_no_receivers(self):
self.dispatcher.queue_message(self.test_message)
assert self.dispatcher._message_queue.qsize() == 0
def test_queue_message_queues_message_if_there_are_receivers(self):
self.test_message.signal.connect(Mock(), weak=False)
self.dispatcher.queue_message(self.test_message)
assert self.dispatcher._message_queue.qsize() == 1
def test_start_should_start_the_dispatcher_thread(self):
self.dispatcher.start()
assert self.dispatcher.is_alive()
assert self.dispatcher._stop_event.is_set() is False
def test_start_should_throw_if_dispatcher_is_already_started(self):
self.dispatcher.start()
self.assertRaises(RuntimeError, self.dispatcher.start)
def test_started_dispatcher_should_process_messages_in_queue(self):
self.test_message.signal.connect(self.mock_callback, weak=False)
self.dispatcher.start()
self.dispatcher.queue_message(self.test_message)
self.dispatcher._message_queue.join()
self.mock_callback.assert_called_once_with(None, arg1="arg1", arg2=1)
def test_stopped_dispatcher_shouldnt_process_messages_in_queue(self):
self.test_message.signal.connect(self.mock_callback, weak=False)
self.dispatcher.start()
self.dispatcher.stop()
self.dispatcher.join()
self.dispatcher.queue_message(self.test_message)
assert self.mock_callback.called is False
| mit | -1,461,367,914,239,270,100 | 31.844156 | 80 | 0.685251 | false |
Ircam-RnD/xmm | doc/doc-misc/python_example.py | 1 | 1686 | import numpy as np
import mhmm
# Load Training Data
training_motion_1 = np.genfromtxt('training_motion_1.txt')
training_motion_2 = np.genfromtxt('training_motion_2.txt')
training_sound_1 = np.genfromtxt('training_sound_1.txt')
training_sound_2 = np.genfromtxt('training_sound_2.txt')
dim_gesture = training_motion_1.shape[1]
dim_sound = training_sound_1.shape[1]
# Create a multimodal training set
training_set = mhmm.TrainingSet(mhmm.BIMODAL)
training_set.set_dimension(dim_gesture + dim_sound)
training_set.set_dimension_input(dim_sound)
# Record First Phrase
for frame_motion, frame_sound in zip(training_motion_1, training_sound_1):
training_set.recordPhrase_input (0, frame_motion)
training_set.recordPhrase_output(0, frame_sound)
training_set.setPhraseLabel(0, mhmm.Label('one'))
# Record Second Phrase
for frame_motion, frame_sound in zip(training_motion_2, training_sound_2):
training_set.recordPhrase_input (1, frame_motion)
training_set.recordPhrase_output(1, frame_sound)
training_set.setPhraseLabel(1, mhmm.Label('two'))
# Instantiate and Train a Hierarchical Multimodal HMM
xmm = mhmm.HierarchicalHMM(mhmm.BIMODAL, training_set)
xmm.set_nbStates(10)
xmm.set_nbMixtureComponents(1)
xmm.set_varianceOffset(0.1, 0.01)
xmm.train()
# Perform joint recognition and Mapping
test_motion = np.genfromtxt('test_motion.txt')
predicted_sound = np.zeros((len(test_motion), dim_sound))
log_likelihoods = np.zeros((len(test_motion), xmm.size()))
xmm.performance_init()
for t, frame_motion in enumerate(test_motion):
xmm.performance_update(frame)
predicted_sound[t, :] = xmm.results_predicted_output
log_likelihoods[t, :] = xmm.results_log_likelihoods
| gpl-3.0 | 5,467,951,583,736,574,000 | 34.87234 | 74 | 0.758007 | false |
google/ml-fairness-gym | agents/recommenders/utils.py | 1 | 8346 | # coding=utf-8
# Copyright 2020 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities for RecSim agent.
Defines a few functions used by the RecSim RNNAgent.
"""
import itertools
import os
import tempfile
from absl import flags
import file_util
from agents.recommenders import model
import numpy as np
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
def accumulate_rewards(rewards, gamma):
"""Computes the discounted reward for the entire episode."""
reversed_rewards = rewards[::-1] # list reversal
acc = list(itertools.accumulate(reversed_rewards, lambda x, y: x*gamma + y))
return np.array(acc[::-1])
def format_data(data_history, gamma, constant_baseline=0.0):
"""The function formats the data into input, output format for keras."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
for curr_recs, curr_rewards in zip(data_history['recommendation_seqs'],
data_history['reward_seqs']):
inp_rec_seq.append(curr_recs[:-1])
inp_reward_seq.append(curr_rewards[:-1])
output_recs.append(np.expand_dims(curr_recs[1:], axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
reward_weights.append(output_rewards)
return {'input': [np.array(inp_rec_seq), np.array(inp_reward_seq)],
'output': np.array(output_recs),
'sample_weights_temporal': np.array(reward_weights)}
def format_data_safe_rl(data_history, gamma, constant_baseline=0.0):
"""The function formats the data into input, output format for keras.
This function is specific to the implementation of CVaR safety constraint.
See https://braintex.goog/read/zyprpgsjbtww for more details.
Args:
data_history: dict with recommendation_seqs, reward_seqs, safety_costs
fields.
gamma: Gamma for reward accumulation over the time horizon.
constant_baseline: Baseline to subtract from each reward to reduce variance.
Returns:
A dictionary with input, output and sample weights_temporal fields
that are input into a keras model.
"""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
trajectories_cost = []
for curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['recommendation_seqs'],
data_history['reward_seqs'],
data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
return {
'input': [np.array(inp_rec_seq),
np.array(inp_reward_seq)],
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def format_data_movielens(data_history, gamma, constant_baseline=0.0,
mask_already_recommended=False, user_id_input=True,
**kwargs):
"""Format data for movielens RNN agent update step."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
user_id_seq = []
trajectories_cost = []
if mask_already_recommended:
# TODO(): Change argument to repeat_movies to be consistent.
masks_for_softmax = []
for user_id, curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['user_id'],
data_history['recommendation_seqs'],
data_history['reward_seqs'],
data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
user_id_seq.append(np.array([user_id] * len(curr_recs[:-1])))
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
masks_for_softmax.append(get_mask_for_softmax(curr_recs[1:-1],
kwargs['action_space_size']))
input_list = [np.array(inp_rec_seq),
np.array(inp_reward_seq)]
if user_id_input:
input_list.append(np.array(user_id_seq))
if mask_already_recommended:
input_list.append(np.array(masks_for_softmax))
return {
'input': input_list,
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def format_data_batch_movielens(data_history,
gamma,
constant_baseline=0.0,
mask_already_recommended=False,
user_id_input=True,
**kwargs):
"""Format data for movielens RNN agent update step."""
inp_rec_seq, inp_reward_seq, output_recs, reward_weights = [], [], [], []
user_id_seq = []
trajectories_cost = []
if mask_already_recommended:
# TODO(): Change argument to repeat_movies to be consistent.
masks_for_softmax = []
for user_id, curr_recs, curr_rewards, curr_safety_costs in zip(
data_history['users'], data_history['recommendations'],
data_history['rewards'], data_history['safety_costs']):
inp_rec_seq.append(np.array(curr_recs[:-1]))
inp_reward_seq.append(np.array(curr_rewards[:-1]))
output_recs.append(np.expand_dims(np.array(curr_recs[1:]), axis=-1))
output_rewards = accumulate_rewards(curr_rewards[1:] - constant_baseline,
gamma)
user_id_seq.append(user_id[:-1])
reward_weights.append(output_rewards)
cost_trajectory = np.mean(curr_safety_costs)
trajectories_cost.append(cost_trajectory)
masks_for_softmax.append(
get_mask_for_softmax(curr_recs[1:-1], kwargs['action_space_size']))
input_list = [
np.array(inp_rec_seq),
np.array(inp_reward_seq),
]
if user_id_input:
input_list.append(np.array(user_id_seq))
if mask_already_recommended:
input_list.append(np.array(masks_for_softmax))
return {
'input': input_list,
'output': np.array(output_recs),
'reward_weights': np.array(reward_weights),
'trajectory_costs': np.array(trajectories_cost)
}
def get_mask_for_softmax(current_recommendations, action_space_size):
mask = np.ones((len(current_recommendations) + 1, action_space_size),
dtype=np.int)
for i in range(len(current_recommendations)):
mask[i+1, current_recommendations[:i+1]] = 0
# TODO(): Add a test to test whether the mask works as expected.
return mask
def load_model(filepath,
optimizer_name,
learning_rate=None,
momentum=None,
gradient_clip_value=None,
gradient_clip_norm=None):
"""Loads RNNAgent model from the path."""
tmp_model_file_path = os.path.join(tempfile.gettempdir(), 'tmp_model.h5')
file_util.copy(filepath, tmp_model_file_path, overwrite=True)
loaded_model = tf.keras.models.load_model(tmp_model_file_path)
file_util.remove(tmp_model_file_path)
optimizer = model.construct_optimizer(optimizer_name, learning_rate, momentum,
gradient_clip_value, gradient_clip_norm)
loaded_model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
sample_weight_mode='temporal')
return loaded_model
| apache-2.0 | -1,753,165,117,362,242,800 | 38.933014 | 80 | 0.649653 | false |
qszhuan/raspberry-pi | logger2.py | 1 | 1054 | import logging
import logging.handlers
from logging.config import dictConfig
logger = logging.getLogger(__name__)
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
}
def configure_logging(logfile_path):
dictConfig(DEFAULT_LOGGING)
default_formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] [PID:%(process)d TID:%(thread)d] %(message)s",
"%d/%m/%Y %H:%M:%S")
file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760, backupCount=300,
encoding='utf-8')
file_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(default_formatter)
console_handler.setFormatter(default_formatter)
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(file_handler)
logging.root.addHandler(console_handler)
configure_logging('/tmp/celery.log')
| mit | 3,055,693,595,506,481,700 | 29.114286 | 128 | 0.66888 | false |
GoodiesHQ/PyPad | pypad/iso_10126.py | 1 | 1166 | """
Implementation of the ISO 10126 algorithm.
"""
import struct
from .utils import random_bytes
from .exceptions import InvalidBlockSize, InvalidMessage
__all__ = ["pad", "unpad", "MAX_BLOCK_SIZE"]
MAX_BLOCK_SIZE = 0x100
def pad(buf, block_size=MAX_BLOCK_SIZE):
"""Padded with random bytes followed by the number of bytes padded."""
if not isinstance(buf, bytes):
raise TypeError("Buffer must be in bytes")
if block_size > MAX_BLOCK_SIZE:
raise InvalidBlockSize("Maximum block size for ISO 10126 is {}".format(MAX_BLOCK_SIZE))
pad_size = block_size - (len(buf) % block_size)
return buf + random_bytes(pad_size - 1) + struct.pack("B", pad_size & 0xff)
def unpad(buf):
"""Extract the last byte and truncate the padded bytes"""
if not isinstance(buf, bytes):
raise TypeError("Buffer must be in bytes")
bufsize = len(buf)
if bufsize == 0:
raise InvalidMessage("The buffer cannot be empty")
pad_size = ord(buf[-1:])
pad_size = pad_size or MAX_BLOCK_SIZE
if bufsize < pad_size:
raise InvalidMessage("The buffer does not match the pad length.")
return buf[:-pad_size]
| mit | -5,746,526,448,586,905,000 | 28.15 | 95 | 0.664666 | false |
Catch-up-TV-and-More/plugin.video.catchuptvandmore | resources/lib/favourites.py | 1 | 9702 | # -*- coding: utf-8 -*-
# Copyright: (c) 2016, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import range, str
from hashlib import md5
import json
import os
from codequick import Script, utils
from kodi_six import xbmc, xbmcgui, xbmcvfs
from resources.lib.addon_utils import get_item_label, get_item_media_path
from resources.lib.kodi_utils import get_selected_item_art, get_selected_item_label, get_selected_item_params, get_selected_item_stream, get_selected_item_info
import resources.lib.mem_storage as mem_storage
FAV_JSON_FP = os.path.join(Script.get_info('profile'), "favourites.json")
FAV_FORMAT_VERSION = 1
def migrate_fav_format(current_fav_format, fav_dict):
"""Migrate favourites dict in last format version
Args:
current_fav_format (int): Current format version of the favourites json file
fav_dict (dict): Favourites dict in old format
Returns:
dict: Updated favourites dict in latest format version
"""
Script.log('Migrate favourites dict in last format version')
new_dict = fav_dict
if current_fav_format == 0:
items = fav_dict
new_dict = {
'items': items,
'format_version': 1
}
current_fav_format = 1
return new_dict
def get_fav_dict_from_json():
"""Get favourites dict from favourites.json
Returns:
dict: Favourites dict
"""
def get_fresh_dict():
return {
'items': {},
'format_version': FAV_FORMAT_VERSION
}
if not xbmcvfs.exists(FAV_JSON_FP):
return get_fresh_dict()
try:
with open(FAV_JSON_FP) as f:
fav_dict = json.load(f)
current_fav_format = fav_dict.get('format_version', 0)
if current_fav_format < FAV_FORMAT_VERSION:
fav_dict = migrate_fav_format(current_fav_format, fav_dict)
return fav_dict
except Exception:
Script.log('Failed to load favourites json data')
xbmcvfs.delete(FAV_JSON_FP)
return get_fresh_dict()
def save_fav_dict_in_json(fav_dict):
"""Dump favourites dict in favourites.json
Args:
fav_dict (dict): Favourites dict to save
"""
with open(FAV_JSON_FP, 'w') as f:
json.dump(fav_dict, f, indent=4)
def guess_fav_prefix(item_id):
"""Keep in memory the current main category (e.g. Live TV, Catch-up TV, ...)
This category label will be used as a prefix when the user add a favourite
"""
prefixes = {
'root': '',
'live_tv': Script.localize(30030),
'replay': Script.localize(30031),
'websites': Script.localize(30032)
}
if item_id in prefixes:
s = mem_storage.MemStorage('fav')
s['prefix'] = prefixes[item_id]
@Script.register
def add_item_to_favourites(plugin, is_playable=False, item_infos={}):
"""Callback function of the 'Add to add-on favourites' item context menu
Args:
plugin (codequick.script.Script)
is_playable (bool): If 'item' is playable
item_infos (dict)
"""
# Need to use same keywords as
# https://scriptmodulecodequick.readthedocs.io/en/latest/_modules/codequick/listing.html#Listitem.from_dict
# in order to be able to directly use `Listitem.from_dict` later
item_dict = {}
# --> callback (string)
item_dict['callback'] = xbmc.getInfoLabel('ListItem.Path').replace(
'plugin://plugin.video.catchuptvandmore', '')
# --> label (string)
item_dict['label'] = get_selected_item_label()
# --> art (dict)
item_dict['art'] = get_selected_item_art()
# --> info (dict)
item_dict['info'] = get_selected_item_info()
# --> stream (dict)
item_dict['stream'] = get_selected_item_stream()
# --> context (list) (TODO)
item_dict['context'] = []
# --> properties (dict) (TODO)
item_dict['properties'] = {}
# --> params (dict)
item_dict['params'] = get_selected_item_params()
# --> subtitles (list) (TODO)
item_dict['subtitles'] = []
if item_infos:
# This item comes from tv_guide_menu
# We need to remove guide TV related
# elements
item_id = item_dict['params']['item_id']
item_dict['label'] = get_item_label(item_id, item_infos)
item_dict['art']["thumb"] = ''
if 'thumb' in item_infos:
item_dict['art']["thumb"] = get_item_media_path(
item_infos['thumb'])
item_dict['art']["fanart"] = ''
if 'fanart' in item_infos:
item_dict['art']["fanart"] = get_item_media_path(
item_infos['fanart'])
item_dict['info']['plot'] = ''
s = mem_storage.MemStorage('fav')
try:
prefix = s['prefix']
except KeyError:
prefix = ''
label_proposal = item_dict['label']
if prefix != '':
label_proposal = prefix + ' - ' + label_proposal
# Ask the user to edit the label
label = utils.keyboard(
plugin.localize(30801), label_proposal)
# If user aborded do not add this item to favourite
if label == '':
return False
item_dict['label'] = label
item_dict['params']['_title_'] = label
item_dict['info']['title'] = label
item_dict['params']['is_playable'] = is_playable
item_dict['params']['is_folder'] = not is_playable
# Compute fav hash
item_hash = md5(str(item_dict).encode('utf-8')).hexdigest()
# Add this item to favourites json file
fav_dict = get_fav_dict_from_json()
item_dict['params']['order'] = len(fav_dict)
fav_dict['items'][item_hash] = item_dict
# Save json file with new fav_dict
save_fav_dict_in_json(fav_dict)
Script.notify(Script.localize(30033), Script.localize(30805), display_time=7000)
@Script.register
def rename_favourite_item(plugin, item_hash):
"""Callback function of the 'Rename' favourite item context menu
Args:
plugin (codequick.script.Script)
item_hash (str): Item hash of the favourite item to rename
"""
item_label = utils.keyboard(plugin.localize(30801),
xbmc.getInfoLabel('ListItem.Label'))
# If user aborded do not edit this item
if item_label == '':
return False
fav_dict = get_fav_dict_from_json()
fav_dict['items'][item_hash]['label'] = item_label
fav_dict['items'][item_hash]['params']['_title_'] = item_label
fav_dict['items'][item_hash]['info']['title'] = item_label
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
@Script.register
def remove_favourite_item(plugin, item_hash):
"""Callback function of the 'Remove' favourite item context menu
Args:
plugin (codequick.script.Script)
item_hash (str): Item hash of the favourite item to remove
"""
fav_dict = get_fav_dict_from_json()
del fav_dict['items'][item_hash]
# We need to fix the order param
# in order to not break the move up/down action
menu = []
for item_hash, item_dict in list(fav_dict['items'].items()):
item = (item_dict['params']['order'], item_hash)
menu.append(item)
menu = sorted(menu, key=lambda x: x[0])
for k in range(0, len(menu)):
item = menu[k]
item_hash = item[1]
fav_dict['items'][item_hash]['params']['order'] = k
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
@Script.register
def move_favourite_item(plugin, direction, item_hash):
"""Callback function of the 'Move Up/Down' favourite item context menu
Args:
plugin (codequick.script.Script)
direction (str): 'down' or 'up'
item_hash (str): Item hash of the favourite item to move
"""
if direction == 'down':
offset = 1
elif direction == 'up':
offset = -1
fav_dict = get_fav_dict_from_json()
item_to_move_id = item_hash
item_to_move_order = fav_dict['items'][item_hash]['params']['order']
menu = []
for item_hash, item_dict in list(fav_dict['items'].items()):
item = (item_dict['params']['order'], item_hash, item_dict)
menu.append(item)
menu = sorted(menu, key=lambda x: x[0])
for k in range(0, len(menu)):
item = menu[k]
item_hash = item[1]
if item_to_move_id == item_hash:
item_to_swap = menu[k + offset]
item_to_swap_order = item_to_swap[0]
item_to_swap_id = item_to_swap[1]
fav_dict['items'][item_to_move_id]['params']['order'] = item_to_swap_order
fav_dict['items'][item_to_swap_id]['params']['order'] = item_to_move_order
save_fav_dict_in_json(fav_dict)
xbmc.executebuiltin('Container.Refresh()')
break
return False
def ask_to_delete_error_fav_item(item_hash):
"""Callback function if a favourite item trigger an error
Suggest user to delete
the fav item that trigger an error
Args:
item_hash (str): Item hash that trigger an error
"""
r = xbmcgui.Dialog().yesno(Script.localize(30600),
Script.localize(30807))
if r:
remove_favourite_item(plugin=None, item_hash=item_hash)
@Script.register
def delete_favourites(plugin):
"""Callback function of 'Delete favourites' setting button
Args:
plugin (codequick.script.Script)
"""
Script.log('Delete favourites db')
xbmcvfs.delete(os.path.join(Script.get_info('profile'), 'favourites.json'))
Script.notify(Script.localize(30374), '')
| gpl-2.0 | -8,269,652,303,382,676,000 | 29.037152 | 159 | 0.612039 | false |
MediaKraken/MediaKraken_Deployment | source/database/db_base_metadata_tvmaze.py | 1 | 2561 | """
Copyright (C) 2015 Quinn D Granfor <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import uuid
def db_meta_tvmaze_changed_uuid(self, maze_uuid):
"""
# metadata changed date by uuid
"""
self.db_cursor.execute('SELECT mm_metadata_tvshow_json->>\'updated\''
' from mm_metadata_tvshow'
' where mm_metadata_media_tvshow_id->\'tvmaze\' ? %s',
(maze_uuid,))
try:
return self.db_cursor.fetchone()['mm_metadata_tvshow_json']
except:
return None
def db_meta_tvmaze_insert(self, series_id_json, tvmaze_name, show_detail,
image_json):
"""
Insert tv series into db
"""
new_uuid = uuid.uuid4()
self.db_cursor.execute('insert into mm_metadata_tvshow (mm_metadata_tvshow_guid,'
' mm_metadata_media_tvshow_id,'
' mm_metadata_tvshow_name,'
' mm_metadata_tvshow_json,'
' mm_metadata_tvshow_localimage_json)'
' values (%s,%s,%s,%s,%s)',
(new_uuid, series_id_json, tvmaze_name, show_detail, image_json))
self.db_commit()
return new_uuid
def db_meta_tvmaze_update(self, series_id_json, tvmaze_name, show_detail,
tvmaze_id):
"""
Update tv series in db
"""
self.db_cursor.execute('update mm_metadata_tvshow'
' set mm_metadata_media_tvshow_id = %s,'
'mm_metadata_tvshow_name = %s,'
' mm_metadata_tvshow_json = %s '
'where mm_metadata_media_tvshow_id->\'tvmaze\'::text = %s',
(series_id_json, tvmaze_name, show_detail, str(tvmaze_id)))
self.db_commit()
| gpl-3.0 | 5,325,643,444,591,288,000 | 38.015625 | 92 | 0.56189 | false |
owenwater/alfred-cal | src/config.py | 1 | 5353 | #!/usr/bin/python
# encoding: utf-8
from util import get_from_dict
from base import Base
import json
def show(func):
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.wf.send_feedback()
return wrapper
class Config(Base):
separator = ' '
open_config_file = "open_config_file"
def main(self, wf):
self._handle_arg()
self._load_json()
config = self._filter_config_item()
if len(config) == 1 and config[0]['keyword'] == self.option:
if 'list' in config[0]:
self._show_sub_list(config[0]['list'], config[0], self.value)
return
elif 'action' in config[0]:
self._show_action_item(config[0])
return
elif self.value != "":
self._show_item(config[0], self.value)
return
if len(self.config) != 0:
self.config = config
self._show_list()
def set_value(self):
from workflow import Workflow
self.wf = Workflow()
if self.args == self.open_config_file:
self._open_file(self.wf.settings_path)
else:
self._handle_arg()
old_value = self.wf.settings[self.option]
self.wf.settings[self.option] = type(old_value)(self.value)
def load_default(self, key):
self._load_json()
for item in self.config:
if key == item['name']:
return item['default']
return None
def _open_file(self, file):
import subprocess
subprocess.call(['open', file])
def _handle_arg(self):
self.option, _, self.value = self.args.partition(self.separator)
self.option = self.option.strip()
self.value = self.value.strip()
def _load_json(self, file="config.json"):
if not hasattr(self, 'config') or not self.config:
with open(file) as fp:
self.config = json.load(fp)
def _filter_config_item(self):
return [item for item in self.config if item['keyword'].startswith(self.option)]
@show
def _show_list(self):
for item in self.config:
title, subtitle = self._get_text(item)
self.wf.add_item(title, subtitle=subtitle, autocomplete=item['keyword'] + " ")
@show
def _show_item(self, item, new_value):
try:
self._check_valid(item, new_value)
title, subtitle = self._get_text(item)
subtitle += ", set to: " + new_value
self.wf.add_item(title,
subtitle=subtitle,
valid=True,
arg=item['name'] + Config.separator + new_value)
except InvalidInputError as e:
self.wf.add_item(e.message)
return
@show
def _show_sub_list(self, sub_list, item, value):
current_value = get_from_dict(self.wf.settings, item['name'], item['default'])
for sub_item, sub_value in sub_list:
if value.lower() in sub_item.lower():
title = sub_item
if sub_value == current_value:
title += " (selected)"
self.wf.add_item(title,
valid=True,
arg=item['name'] + Config.separator + str(sub_value),
autocomplete=item['keyword'] + ' ' + sub_item.lower())
@show
def _show_action_item(self, item):
self.wf.add_item(item['description'],
valid=True,
arg=self.open_config_file)
def _check_valid(self, item, new_value):
return getattr(self, '_check_' + item['type'])(item, new_value)
def _get_text(self, item):
title = item['description']
if 'name' in item and 'default' in item:
current_value = get_from_dict(self.wf.settings, item['name'], item['default'])
if 'list' in item:
current_value = next((i for i in item['list'] if i[1] == current_value))[0]
subtitle = u"Current: %s" % (current_value)
else:
subtitle = ""
return title, subtitle
def _check_int(self, item, new_value):
not_int = "Please enter an integer"
too_small = "Value must be larger than %s"
too_large = "Value must be smaller than %s"
out_of_range = "Value must be between %s and %s"
try:
value = int(new_value)
if 'min' in item and 'max' in item and (not item['min'] <= value <= item['max']):
raise InvalidInputError(out_of_range % (item['min'], item['max']))
elif 'min' in item and value < item['min']:
raise InvalidInputError(too_small % (item['min']))
elif 'max' in item and value > item['max']:
raise InvalidInputError(too_large % (item['max']))
except ValueError:
raise InvalidInputError(not_int)
def _check_str(self, item, new_value):
return True
def _check_list(self, item):
return True
class InvalidInputError(Exception):
pass
if __name__ == "__main__":
import sys
c = Config(' '.join(sys.argv[1:]))
c.execute()
| mit | 2,889,024,400,147,739,000 | 32.666667 | 93 | 0.5255 | false |
Featuretools/featuretools | featuretools/primitives/base/aggregation_primitive_base.py | 1 | 5863 | import copy
import functools
import inspect
from featuretools.primitives.base.primitive_base import PrimitiveBase
from featuretools.primitives.base.utils import inspect_function_args
class AggregationPrimitive(PrimitiveBase):
stack_on = None # whitelist of primitives that can be in input_types
stack_on_exclude = None # blacklist of primitives that can be insigniture
base_of = None # whitelist of primitives this prim can be input for
base_of_exclude = None # primitives this primitive can't be input for
stack_on_self = True # whether or not it can be in input_types of self
def generate_name(self, base_feature_names, relationship_path_name,
parent_entity_id, where_str, use_prev_str):
base_features_str = ", ".join(base_feature_names)
return u"%s(%s.%s%s%s%s)" % (
self.name.upper(),
relationship_path_name,
base_features_str,
where_str,
use_prev_str,
self.get_args_string(),
)
def make_agg_primitive(function, input_types, return_type, name=None,
stack_on_self=True, stack_on=None,
stack_on_exclude=None, base_of=None,
base_of_exclude=None, description=None,
cls_attributes=None, uses_calc_time=False,
default_value=None, commutative=False,
number_output_features=1):
'''Returns a new aggregation primitive class. The primitive infers default
values by passing in empty data.
Args:
function (function): Function that takes in a series and applies some
transformation to it.
input_types (list[Variable]): Variable types of the inputs.
return_type (Variable): Variable type of return.
name (str): Name of the function. If no name is provided, the name
of `function` will be used.
stack_on_self (bool): Whether this primitive can be in input_types of self.
stack_on (list[PrimitiveBase]): Whitelist of primitives that
can be input_types.
stack_on_exclude (list[PrimitiveBase]): Blacklist of
primitives that cannot be input_types.
base_of (list[PrimitiveBase): Whitelist of primitives that
can have this primitive in input_types.
base_of_exclude (list[PrimitiveBase]): Blacklist of
primitives that cannot have this primitive in input_types.
description (str): Description of primitive.
cls_attributes (dict[str -> anytype]): Custom attributes to be added to
class. Key is attribute name, value is the attribute value.
uses_calc_time (bool): If True, the cutoff time the feature is being
calculated at will be passed to the function as the keyword
argument 'time'.
default_value (Variable): Default value when creating the primitive to
avoid the inference step. If no default value if provided, the
inference happen.
commutative (bool): If True, will only make one feature per unique set
of base features.
number_output_features (int): The number of output features (columns in
the matrix) associated with this feature.
Example:
.. ipython :: python
from featuretools.primitives import make_agg_primitive
from featuretools.variable_types import DatetimeTimeIndex, Numeric
def time_since_last(values, time=None):
time_since = time - values.iloc[-1]
return time_since.total_seconds()
TimeSinceLast = make_agg_primitive(
function=time_since_last,
input_types=[DatetimeTimeIndex],
return_type=Numeric,
description="Time since last related instance",
uses_calc_time=True)
'''
if description is None:
default_description = 'A custom primitive'
doc = inspect.getdoc(function)
description = doc if doc is not None else default_description
cls = {"__doc__": description}
if cls_attributes is not None:
cls.update(cls_attributes)
name = name or function.__name__
new_class = type(name, (AggregationPrimitive,), cls)
new_class.name = name
new_class.input_types = input_types
new_class.return_type = return_type
new_class.stack_on = stack_on
new_class.stack_on_exclude = stack_on_exclude
new_class.stack_on_self = stack_on_self
new_class.base_of = base_of
new_class.base_of_exclude = base_of_exclude
new_class.commutative = commutative
new_class.number_output_features = number_output_features
new_class, default_kwargs = inspect_function_args(new_class,
function,
uses_calc_time)
if len(default_kwargs) > 0:
new_class.default_kwargs = default_kwargs
def new_class_init(self, **kwargs):
self.kwargs = copy.deepcopy(self.default_kwargs)
self.kwargs.update(kwargs)
self.partial = functools.partial(function, **self.kwargs)
self.partial.__name__ = name
new_class.__init__ = new_class_init
new_class.get_function = lambda self: self.partial
else:
# creates a lambda function that returns function every time
new_class.get_function = lambda self, f=function: f
if default_value is None:
# infers default_value by passing empty data
try:
new_class.default_value = function(*[[]] * len(input_types))
except Exception:
pass
else:
# avoiding the inference step
new_class.default_value = default_value
return new_class
| bsd-3-clause | -8,215,395,603,012,441,000 | 38.348993 | 83 | 0.622889 | false |
trhongbinwang/data_science_journey | deep_learning/pytorch/tutorials/09 - Image Captioning/data.py | 1 | 3501 | import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import pickle
import numpy as np
import nltk
from PIL import Image
from vocab import Vocabulary
from pycocotools.coco import COCO
class CocoDataset(data.Dataset):
"""COCO Custom Dataset compatible with torch.utils.data.DataLoader."""
def __init__(self, root, json, vocab, transform=None):
"""Set the path for images, captions and vocabulary wrapper.
Args:
root: image directory.
json: coco annotation file path.
vocab: vocabulary wrapper.
transform: image transformer
"""
self.root = root
self.coco = COCO(json)
self.ids = list(self.coco.anns.keys())
self.vocab = vocab
self.transform = transform
def __getitem__(self, index):
"""Returns one data pair (image and caption)."""
coco = self.coco
vocab = self.vocab
ann_id = self.ids[index]
caption = coco.anns[ann_id]['caption']
img_id = coco.anns[ann_id]['image_id']
path = coco.loadImgs(img_id)[0]['file_name']
image = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
image = self.transform(image)
# Convert caption (string) to word ids.
tokens = nltk.tokenize.word_tokenize(str(caption).lower())
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab('<end>'))
target = torch.Tensor(caption)
return image, target
def __len__(self):
return len(self.ids)
def collate_fn(data):
"""Creates mini-batch tensors from the list of tuples (image, caption).
Args:
data: list of tuple (image, caption).
- image: torch tensor of shape (3, 256, 256).
- caption: torch tensor of shape (?); variable length.
Returns:
images: torch tensor of shape (batch_size, 3, 256, 256).
targets: torch tensor of shape (batch_size, padded_length).
lengths: list; valid length for each padded caption.
"""
# Sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions = zip(*data)
# Merge images (from tuple of 3D tensor to 4D tensor)
images = torch.stack(images, 0)
# Merge captions (from tuple of 1D tensor to 2D tensor)
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths
def get_data_loader(root, json, vocab, transform, batch_size, shuffle, num_workers):
"""Returns torch.utils.data.DataLoader for custom coco dataset."""
# COCO dataset
coco = CocoDataset(root=root,
json=json,
vocab = vocab,
transform=transform)
# Data loader for COCO dataset
data_loader = torch.utils.data.DataLoader(dataset=coco,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader | apache-2.0 | 3,336,840,735,813,640,000 | 34.373737 | 84 | 0.579834 | false |
amontefusco/ledpanel-utils | textopt.py | 1 | 1845 | #!/usr/bin/python
# Show a sliding text on RGB led panel
# (c) 2014 Sergio Tanzilli - [email protected]
# Multiple panel capability added by A.Montefusco 2017,
# requires ledpanel.ko 2.0
# All the images are computed in advance in order to improve speed
# in case of lengthy string
#
import time
import sys
import os
from datetime import datetime
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import StringIO
import probe
if len(sys.argv)<6 or len(sys.argv)>6:
print "Syntax:"
print " %s text r g b loop" % (sys.argv[0])
print
print "loop=0 forever loop"
quit()
print "Panel size: %d x %d\n" % (probe.panel_w, probe.panel_h)
font = ImageFont.truetype('fonts/Ubuntu-B.ttf', 32)
width, height = font.getsize(sys.argv[1])
text=sys.argv[1]
r=int(sys.argv[2])
g=int(sys.argv[3])
b=int(sys.argv[4])
loops=int(sys.argv[5])
#
# compute all images
#
print "Computing all the images, please wait...."
x = probe.panel_w
imgs = []
while True:
x=x-1
if x < -(width): break
im = Image.new("RGB", (probe.panel_w, probe.panel_h), "black")
draw = ImageDraw.Draw(im)
draw.fontmode="1" #No antialias
draw.rectangle((0, 0, probe.panel_w - 1, height), outline=0, fill=0)
draw.text((x, -1), text, (r,g,b), font=font)
imgs.append(im)
print "All images generated (%d), stream starts..." % len(imgs)
# setup driver access
out_file = open("/sys/class/ledpanel/rgb_buffer","w")
output = StringIO.StringIO()
x = probe.panel_w
i = 0
while True:
x = x - 1
if x < -(width):
if loops==0:
x = probe.panel_w
i = 0
continue
else:
if loops==1:
break
else:
loops=loops-1
x = probe.panel_w
i = 0
continue
output.truncate(0)
imgs[i].save(output, format='PPM')
buf=output.getvalue()
out_file.seek(0)
out_file.write(buf[13:])
i = i + 1
out_file.close()
| gpl-2.0 | 866,967,959,076,484,500 | 16.912621 | 69 | 0.659079 | false |
tensorflow/tfx | tfx/components/util/udf_utils_test.py | 1 | 7028 | # Lint as: python2, python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.util.udf_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import os
import subprocess
import sys
import tempfile
from unittest import mock
import tensorflow as tf
from tfx.components.util import udf_utils
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_executor
from tfx.dsl.components.base import executor_spec
from tfx.types import component_spec
from tfx.utils import import_utils
class _MyComponentSpec(component_spec.ComponentSpec):
PARAMETERS = {
'my_module_file':
component_spec.ExecutionParameter(type=str, optional=True),
'my_module_path':
component_spec.ExecutionParameter(type=str, optional=True),
}
INPUTS = {}
OUTPUTS = {}
class _MyComponent(base_component.BaseComponent):
SPEC_CLASS = _MyComponentSpec
EXECUTOR_SPEC = executor_spec.BeamExecutorSpec(base_executor.BaseExecutor)
class UdfUtilsTest(tf.test.TestCase):
@mock.patch.object(import_utils, 'import_func_from_source')
def testGetFnFromSource(self, mock_import_func):
exec_properties = {'module_file': 'path/to/module_file.py'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path/to/module_file.py',
'test_fn')
@mock.patch.object(import_utils, 'import_func_from_module')
def testGetFnFromModule(self, mock_import_func):
exec_properties = {'module_path': 'path.to.module'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path.to.module', 'test_fn')
@mock.patch.object(import_utils, 'import_func_from_module')
def testGetFnFromModuleFn(self, mock_import_func):
exec_properties = {'test_fn': 'path.to.module.test_fn'}
udf_utils.get_fn(exec_properties, 'test_fn')
mock_import_func.assert_called_once_with('path.to.module', 'test_fn')
def testGetFnFailure(self):
with self.assertRaises(ValueError):
udf_utils.get_fn({}, 'test_fn')
def test_ephemeral_setup_py_contents(self):
contents = udf_utils._get_ephemeral_setup_py_contents(
'my_pkg', '0.0+xyz', ['a', 'abc', 'xyz'])
self.assertIn("name='my_pkg',", contents)
self.assertIn("version='0.0+xyz',", contents)
self.assertIn("py_modules=['a', 'abc', 'xyz'],", contents)
def test_version_hash(self):
def _write_temp_file(user_module_dir, file_name, contents):
with open(os.path.join(user_module_dir, file_name), 'w') as f:
f.write(contents)
user_module_dir = tempfile.mkdtemp()
_write_temp_file(user_module_dir, 'a.py', 'aa1')
_write_temp_file(user_module_dir, 'bb.py', 'bbb2')
_write_temp_file(user_module_dir, 'ccc.py', 'cccc3')
_write_temp_file(user_module_dir, 'dddd.py', 'ddddd4')
expected_plaintext = (
# Length and encoding of "a.py".
b'\x00\x00\x00\x00\x00\x00\x00\x04a.py'
# Length and encoding of contents of "a.py".
b'\x00\x00\x00\x00\x00\x00\x00\x03aa1'
# Length and encoding of "ccc.py".
b'\x00\x00\x00\x00\x00\x00\x00\x06ccc.py'
# Length and encoding of contents of "ccc.py".
b'\x00\x00\x00\x00\x00\x00\x00\x05cccc3'
# Length and encoding of "dddd.py".
b'\x00\x00\x00\x00\x00\x00\x00\x07dddd.py'
# Length and encoding of contents of "dddd.py".
b'\x00\x00\x00\x00\x00\x00\x00\x06ddddd4')
h = hashlib.sha256()
h.update(expected_plaintext)
expected_version_hash = h.hexdigest()
self.assertEqual(
expected_version_hash,
'4fecd9af212c76ee4097037caf78c6ba02a2e82584837f2031bcffa0f21df43e')
self.assertEqual(
udf_utils._get_version_hash(user_module_dir,
['dddd.py', 'a.py', 'ccc.py']),
expected_version_hash)
def testAddModuleDependencyAndPackage(self):
# Do not test packaging in unsupported environments.
if not udf_utils.should_package_user_modules():
return
# Create a component with a testing user module file.
temp_dir = tempfile.mkdtemp()
temp_module_file = os.path.join(temp_dir, 'my_user_module.py')
with open(temp_module_file, 'w') as f:
f.write('# Test user module file.\nEXPOSED_VALUE="ABC123xyz"')
component = _MyComponent(
spec=_MyComponentSpec(my_module_file=temp_module_file))
# Add the user module file pip dependency.
udf_utils.add_user_module_dependency(component, 'my_module_file',
'my_module_path')
self.assertLen(component._pip_dependencies, 1)
dependency = component._pip_dependencies[0]
self.assertIsInstance(dependency, udf_utils.UserModuleFilePipDependency)
self.assertIs(dependency.component, component)
self.assertEqual(dependency.module_file_key, 'my_module_file')
self.assertEqual(dependency.module_path_key, 'my_module_path')
# Resolve the pip dependency and package the user module.
temp_pipeline_root = tempfile.mkdtemp()
component._resolve_pip_dependencies(temp_pipeline_root)
self.assertLen(component._pip_dependencies, 1)
dependency = component._pip_dependencies[0]
# The hash version is based on the module names and contents and thus
# should be stable.
self.assertEqual(
dependency,
os.path.join(
temp_pipeline_root, '_wheels', 'tfx_user_code_MyComponent-0.0+'
'1c9b861db85cc54c56a56cbf64f77c1b9d1ded487d60a97d082ead6b250ee62c'
'-py3-none-any.whl'))
# Test import behavior within context manager.
with udf_utils.TempPipInstallContext([dependency]):
# Test import from same process.
import my_user_module # pylint: disable=g-import-not-at-top
self.assertEqual(my_user_module.EXPOSED_VALUE, 'ABC123xyz')
del sys.modules['my_user_module']
# Test import from a subprocess.
self.assertEqual(
subprocess.check_output([
sys.executable, '-c',
'import my_user_module; print(my_user_module.EXPOSED_VALUE)'
]), b'ABC123xyz\n')
# Test that the import paths are cleaned up, so the user module can no
# longer be imported.
with self.assertRaises(ModuleNotFoundError):
import my_user_module # pylint: disable=g-import-not-at-top
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,081,165,920,403,293,700 | 37.828729 | 78 | 0.680279 | false |
smartboyathome/Wonderland-Engine | WhiteRabbit/checks/SampleChecks.py | 1 | 2132 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
from datetime import datetime, timedelta
import time
from WhiteRabbit.check_types import ServiceCheck, InjectCheck, AttackerCheck
class SampleServiceCheck(ServiceCheck):
def __init__(self, machine, team_id, db_host, db_port, db_name):
super(SampleServiceCheck, self).__init__(machine, team_id, db_host, db_port, db_name)
@property
def timeout(self):
return 15
def run_check(self):
self._mutable_vars.score = 5
pass
class SampleInjectCheck(InjectCheck):
def __init__(self, machine_id, team_id, db_host, db_port, db_name, time_to_check):
super(SampleInjectCheck, self).__init__(machine_id, team_id, db_host, db_port, db_name)
self._run_time = time_to_check
@property
def timeout(self):
return 15
@property
def time_to_run(self):
return self._run_time
@property
def inject_number(self):
return 0
def run_check(self):
self._mutable_vars.score = 5
class SampleAttackerCheck(AttackerCheck):
def __init__(self, machine_id, team_id, db_host, db_port, db_name):
super(SampleAttackerCheck, self).__init__(machine_id, team_id, db_host, db_port, db_name)
@property
def timeout(self):
return 15
def run_check(self):
self._mutable_vars.score = -5 | agpl-3.0 | -1,298,960,669,361,816,600 | 31.318182 | 97 | 0.68152 | false |
mattjhayes/nmeta2dpae | nmeta2dpae/dp.py | 1 | 9858 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*** nmeta - Network Metadata - Policy Interpretation Class and Methods
"""
This module is part of the nmeta2 suite
.
It provides an object for data plane coordination services
.
Version 2.x Toulouse Code
"""
#*** Logging imports:
import logging
import logging.handlers
import coloredlogs
#*** General imports:
import sys
import traceback
#*** JSON:
import json
from json import JSONEncoder
#*** nmeta-dpae imports:
import sniff
import tc
class DP(object):
"""
This class is instantiated by nmeta2_dpae.py and provides methods
to run the data plane services.
"""
def __init__(self, _config):
#*** Get logging config values from config class:
_logging_level_s = _config.get_value \
('dp_logging_level_s')
_logging_level_c = _config.get_value \
('dp_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_coloredlogs_enabled = _config.get_value('coloredlogs_enabled')
_console_format = _config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
if _coloredlogs_enabled:
#*** Colourise the logs to make them easier to understand:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
#*** Add console log handler to logger:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
#*** Instantiate TC Classification class:
self.tc = tc.TC(_config)
#*** Instantiate Sniff Class:
self.sniff = sniff.Sniff(_config, self.tc)
def dp_discover(self, queue, if_name, dpae2ctrl_mac,
ctrl2dpae_mac, dpae_ethertype, timeout, uuid_dpae,
uuid_controller):
"""
Data plane service for DPAE Join Discover Packet Sniffing
"""
self.logger.debug("Starting data plane discover confirm on %s",
if_name)
#*** Check promisc mode and enable if not turned on:
promisc = 0
try:
promisc = self.sniff.get_promiscuous_mode(if_name)
except Exception, e:
self.logger.error("Exception setting promiscuous mode: %s",
e, exc_info=True)
result = 0
queue.put(result)
return result
if not promisc:
#*** Set interface to promiscuous mode so we see all packets:
try:
self.sniff.set_promiscuous_mode(if_name)
except Exception, e:
self.logger.error("Exception setting promiscuous mode: %s",
e, exc_info=True)
result = 0
queue.put(result)
return result
else:
self.logger.info("Interface already in promiscuous mode")
#*** Run the sniffer to see if we can capture a discover
#*** confirm packet:
try:
payload = self.sniff.discover_confirm(if_name, dpae2ctrl_mac,
ctrl2dpae_mac, dpae_ethertype, timeout)
except Exception, e:
self.logger.error("Exception running sniff.discover_confirm: %s",
e, exc_info=True)
result = 0
queue.put(result)
return result
if payload:
#*** Validate JSON in payload:
json_decode = JSON_Body(str(payload))
if json_decode.error:
self.logger.error("Phase 3 packet payload is not JSON"
"error=%s", json_decode.error_full)
result = 0
queue.put(result)
return result
#*** Validate required keys are present in the JSON:
if not json_decode.validate(['hostname_dpae', 'uuid_dpae',
'uuid_controller', 'if_name']):
self.logger.error("Validation error %s", json_decode.error)
result = 0
queue.put(result)
return result
#*** Validate the Controller UUID value in the JSON:
if str(json_decode['uuid_controller']) == str(uuid_controller):
self.logger.info("Success! Matched discover confirm.")
result = 1
queue.put(result)
return result
else:
self.logger.error("Validation error for uuid_controller")
result = 0
queue.put(result)
return result
else:
self.logger.warning("No payload returned. This happens sometimes")
result = 0
queue.put(result)
return result
def dp_run(self, interplane_queue, tc_policy, if_name):
"""
Run Data Plane (DP) Traffic Classification for an interface
"""
#*** Set local identity harvest flags in tc for efficient access:
self.logger.debug("Setting Identity Harvest Flags")
self.tc.id_arp = tc_policy.get_id_flag(if_name, 'arp')
self.tc.id_lldp = tc_policy.get_id_flag(if_name, 'lldp')
self.tc.id_dns = tc_policy.get_id_flag(if_name, 'dns')
self.tc.id_dhcp = tc_policy.get_id_flag(if_name, 'dhcp')
#*** Set up TC classifiers to run in tc class:
_classifiers = tc_policy.get_tc_classifiers(if_name)
self.tc.instantiate_classifiers(_classifiers)
#*** Run sniffer to capture traffic and send to TC:
try:
self.sniff.sniff_run(if_name, self.tc, tc_policy, interplane_queue)
except Exception, e:
self.logger.critical("sniff.sniff_run: %s", e, exc_info=True)
return 0
class JSON_Body(object):
"""
Represents a JSON-encoded body of an HTTP request.
Doesn't do logging, but does set .error when things
don't go to plan with a friendly message.
"""
def __init__(self, req_body):
self.json = {}
self.error = ""
self.error_full = ""
self.req_body = self.decode(req_body)
def decode(self, req_body):
"""
Passed an allegedly JSON body and see if it
decodes. Set error variable for exceptions
"""
json_decode = {}
if req_body:
#*** Try decode as JSON:
try:
json_decode = json.loads(req_body)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.error = '{\"Error\": \"Bad JSON\"}'
self.error_full = '{\"Error\": \"Bad JSON\",' + \
'\"exc_type\":' + str(exc_type) + ',' + \
'\"exc_value\":' + str(exc_value) + ',' + \
'\"exc_traceback\":' + str(exc_traceback) + '}'
return 0
else:
json_decode = {}
self.json = json_decode
return json_decode
def validate(self, key_list):
"""
Passed a list of keys and check that they exist in the
JSON. If they don't return 0 and set error to description
of first missing key that was found
"""
for key in key_list:
if not key in self.req_body:
self.error = '{\"Error\": \"No ' + key + '\"}'
return 0
return 1
def __getitem__(self, key):
"""
Passed a key and see if it exists in JSON
object. If it does, return the value for the key.
If not, return 0
Example:
foo = json_body['foo']
"""
if key in self.req_body:
return self.req_body[key]
else:
return 0
| apache-2.0 | 1,177,872,324,952,776,200 | 37.209302 | 79 | 0.546764 | false |
th0mmeke/toyworld | chemistry_model/default_chemistry.py | 1 | 5522 | """
Created on 14 Aug 2013
@author: thom
"""
from rdkit.Chem import AllChem as Chem
class DefaultChemistry(object):
"""A simple Chemistry based on real-world chemistry."""
def __init__(self, parameters=None):
""":param parameters: Parameters object"""
# Single : 77.7 = 777.1/10 = 104.2 + 83 + 38.4 + 35 + 99 + 93 + 111 + 73 + 85.5 + 55
# Double : 148.2 = 889/6 = 185 + 146 + 149 + 119 + 147 + 143
# Triple : 224.3 = 897/4 = 258 + 200 + 226 + 213
default_data = {
'H1H': 104.2,
'C1C': 83,
'N1N': 38.4,
'O1O': 35,
'H1C': 99, 'C1H': 99,
'H1N': 93, 'N1H': 93,
'H1O': 111, 'O1H': 111,
'C1N': 73, 'N1C': 73,
'C1O': 85.5, 'O1C': 85.5,
'N1O': 55, 'O1N': 55,
'C2O': 185, 'O2C': 185, # rough average of range
'C2C': 146,
'N2N': 149,
'O2O': 119,
'C2N': 147, 'N2C': 147,
'N2O': 143, 'O2N': 143,
'C3O': 258, 'O3C': 258,
'C3C': 200,
'N3N': 226,
'C3N': 213, 'N3C': 213,
'C4C': 200 # theoretically possible from valences, but in nature forms a C2C bond instead
}
count = {}
default_bond_energies = {}
for bond, energy in default_data.iteritems():
key = int(bond[1])
try:
count[key] += 1
default_bond_energies[key] += energy
except:
count[key] = 1
default_bond_energies[key] = energy
for i in (1, 2, 3):
default_bond_energies[i] = default_bond_energies[i] / count[i]
self._atoms = ['C', 'N', 'O', 'H']
self._bond_formation_energies = {}
self._bond_break_energies = {}
formation_energies = None
break_energies = None
if parameters is not None: # Parameters object
atoms = parameters.get('Atoms')
if atoms is not None:
self._atoms = []
for atom in atoms.findall('Atom'):
self._atoms.append(atom.text)
formation_energies = parameters.get('BondFormationEnergies')
break_energies = parameters.get('BondBreakEnergies')
for atom_1 in self._atoms:
for atom_2 in self._atoms:
for bond_type, xml_key in {1: 'Single', 2: 'Double', 3: 'Triple'}.iteritems():
key = "{}{}{}".format(atom_1, bond_type, atom_2)
if formation_energies is None:
if key in default_data.keys():
self._bond_formation_energies[key] = default_data[key]
else:
self._bond_formation_energies[key] = default_bond_energies[bond_type]
else:
self._bond_formation_energies[key] = float(formation_energies.find(xml_key).text)
if break_energies is None:
self._bond_break_energies[key] = self._bond_formation_energies[key]
else:
self._bond_break_energies[key] = float(break_energies.find(xml_key).text)
def get_bond_potential(self, atom):
"""Requires Explicit Hs!
Simple method based on standard Lewis dot-structures e.g., http://library.thinkquest.org/C006669/data/Chem/bonding/lewis.html
Bond calculation:
FC = V - N - B (where single bond = 1, double = 2, etc) to make N = 8
"""
if atom.GetAtomicNum() == 1:
if len(atom.GetBonds()) == 0: # if not already bound...
return 1
else:
return 0
else:
bonded_electrons = 0
for bond in atom.GetBonds():
bonded_electrons += bond.GetBondType() # relies on Chem.BondType mapping to int...
valence_electrons = Chem.GetPeriodicTable().GetNOuterElecs(atom.GetAtomicNum())
# logging.info("Bond potential: 8 - ({} + {} + {})".format(valence_electrons, bonded_electrons, atom.GetFormalCharge()))
return 8 - (valence_electrons + bonded_electrons + atom.GetFormalCharge())
def get_bond_energy(self, atom_1, atom_2, end_bond_type=0, start_bond_type=0):
"""Returns the energy REQUIRED to make the bond change from start_bond_type (or existing type if not provided) to end_bond_type.
Creation of a bond requires -e; breaking the bond +e
Energies taken from http://www.cem.msu.edu/~reusch/OrgPage/bndenrgy.htm - Average Bond Dissociation Enthalpies in kcal per mole
:param atom_1: One end of the bond
:type atom_1: Chem.Atom
:param atom_2: Other end of the bond
:type atom_2: Chem.Atom
:param bond_type: Type of the bond, corresponding to index into Chem.BondType.values
:type bond_type: int
:rtype: int
"""
# Energy to release current bond state
if start_bond_type == 0:
start_energy = 0
else:
start_energy = self._bond_break_energies[atom_1.GetSymbol() + str(min(3, start_bond_type)) + atom_2.GetSymbol()]
# Energy to create desired bond state
if end_bond_type == 0:
end_energy = 0
else:
end_energy = self._bond_formation_energies[atom_1.GetSymbol() + str(min(3, end_bond_type)) + atom_2.GetSymbol()]
return start_energy - end_energy
| gpl-3.0 | 2,791,581,899,556,919,300 | 39.602941 | 136 | 0.528251 | false |
alshedivat/kgp | tests/utils/test_assemble.py | 1 | 3048 | from six.moves import xrange
import numpy as np
import kgp
from kgp.losses import gen_gp_loss
from kgp.utils.assemble import *
# Test parameters
N = 256
batch_size = 64
input_shape = (32, 10)
output_shape = (2, )
optimizer = 'rmsprop'
# Load configs
narx_configs = load_NN_configs(filename='narx.yaml',
input_shape=input_shape,
output_shape=output_shape)
lstm_configs = load_NN_configs(filename='lstm.yaml',
input_shape=input_shape,
output_shape=output_shape)
rnn_configs = load_NN_configs(filename='rnn.yaml',
input_shape=input_shape,
output_shape=output_shape)
gru_configs = load_NN_configs(filename='gru.yaml',
input_shape=input_shape,
output_shape=output_shape)
gp_configs = load_GP_configs(filename='gp.yaml',
nb_outputs=np.prod(output_shape),
batch_size=batch_size,
nb_train_samples=N)
def test_assemble_narx():
for i in xrange(3):
model = assemble('NARX', narx_configs[str(i) + 'H'])
model.compile(optimizer=optimizer, loss='mse')
assert model.built
def test_assemble_gpnarx():
for gp_type in ['GP', 'MSGP']:
model = assemble('GP-NARX', [narx_configs['1H'], gp_configs[gp_type]])
loss = [gen_gp_loss(gp) for gp in model.output_layers]
model.compile(optimizer=optimizer, loss=loss)
assert model.built
def test_assemble_rnn():
for i in xrange(1, 3):
model = assemble('RNN', rnn_configs[str(i) + 'H'])
model.compile(optimizer=optimizer, loss='mse')
assert model.built
def test_assemble_gprnn():
for gp_type in ['GP', 'MSGP']:
model = assemble('GP-RNN', [rnn_configs['1H'], gp_configs[gp_type]])
loss = [gen_gp_loss(gp) for gp in model.output_layers]
model.compile(optimizer=optimizer, loss=loss)
assert model.built
def test_assemble_lstm():
for i in xrange(1, 3):
model = assemble('LSTM', lstm_configs[str(i) + 'H'])
model.compile(optimizer=optimizer, loss='mse')
assert model.built
def test_assemble_gplstm():
for gp_type in ['GP', 'MSGP']:
model = assemble('GP-LSTM', [lstm_configs['1H'], gp_configs[gp_type]])
loss = [gen_gp_loss(gp) for gp in model.output_layers]
model.compile(optimizer=optimizer, loss=loss)
assert model.built
def test_assemble_gru():
for i in xrange(1, 3):
model = assemble('GRU', gru_configs[str(i) + 'H'])
model.compile(optimizer=optimizer, loss='mse')
assert model.built
def test_assemble_gpgru():
for gp_type in ['GP', 'MSGP']:
model = assemble('GP-GRU', [gru_configs['1H'], gp_configs[gp_type]])
loss = [gen_gp_loss(gp) for gp in model.output_layers]
model.compile(optimizer=optimizer, loss=loss)
assert model.built
| mit | 4,996,376,276,300,175,000 | 35.285714 | 78 | 0.583333 | false |
Nateowami/flex-languagedepot-metadata | src/LanguageDepotAnalyze.py | 1 | 5946 | #!/usr/bin/python3
import sys
import traceback
import os
import glob
import json # data type no.1
import psycopg2
from importlib import import_module
import subprocess
class Runner(object):
"""find the files in this directory and ones above this directory,
including the mercurial files"""
def __init__(self, config, dataPath):
# Makes sure dataPath ends with a '/' character
global rootProjectFolder
if (dataPath[-1] == "/"):
rootProjectFolder = dataPath
else:
rootProjectFolder = dataPath + '/'
self._checkCfgType(config)
# end of init
def _checkCfgType(self, config):
# declare credential fields here
global usrhost
global databse
global usrname
global usrpasswd
# check what kind of format the config file uses
configOutput = subprocess.check_output(['cat', config]).decode('utf-8')
try:
parsedConfig = json.loads(configOutput)
parsedConfig['host']
parsedConfig['dbname']
parsedConfig['user']
parsedConfig['password']
except (ValueError):
print("{} is not valid json.".format(config))
return
except (KeyError):
print("{} does not contain proper credentials. "
"(must include 'host', 'dbname', 'user', and 'password')"
.format(config))
return
else:
usrhost = parsedConfig['host']
databse = parsedConfig['dbname']
usrname = parsedConfig['user']
usrpasswd = parsedConfig['password']
def run(self):
# checks to see if the credentials came through
if (
"usrpasswd" not in globals() or
"usrname" not in globals() or
"databse" not in globals() or
"usrhost" not in globals()
):
print('Not enough credentials.')
return
# now it connects to the database, to see if they're correct
conn_string = 'host={} dbname={} user={} password={}'.format(
usrhost, databse, usrname, usrpasswd)
try:
conn = psycopg2.connect(conn_string)
except Exception:
print('Incorrect Credentials.')
raise Exception
# find all files/folders in root folder
files = glob.glob(rootProjectFolder + '*')
files.sort()
listOfProjects = [f for f in files if os.path.isdir(f) and os.path.isfile(os.path.join(f, 'FLExProject.ModelVersion'))]
numOfProjects = len(listOfProjects)
for folder in listOfProjects:
fldrIndex = listOfProjects.index(folder)
# Analyzer needs to pass rootProjectFolder as a parameter
# so that the directory can be cropped out of the name later
analyzer = Analyze(
folder, rootProjectFolder, fldrIndex, numOfProjects)
try:
analyzer.run(conn)
except Exception:
print("Unfortunately, %s had a problem:\n" % folder)
print("-"*60)
traceback.print_exc(file=sys.stdout)
print("-"*60)
print("Moving on...\n\n")
# end of Runner class
class Analyze(object):
"""retrieve various valuable pieces of information"""
def __init__(self, hgdir, parentDirs, current, totalNumber):
# To get the project name, remove the parent directories from the
# project's path to get just the directory name.
self.name = hgdir[len(parentDirs):]
self.hgdir = hgdir
print(
'(%s/%s) Scanning %s' % (current+1, totalNumber, self.name),
end='')
def run(self, conn):
# check if the project is already entered into the database, otherwise
# continue as normal
# Why? In what situation would it be scanned twice? XXX
curs = conn.cursor()
curs.execute(
"SELECT scanDone FROM project.metadata WHERE name = %s;",
(self.name,))
entries = curs.fetchone()
if (entries == (True,)):
print('\nAlready scanned. Moving on...')
return
else:
# insert name into database, this creates a row we can use later
curs.execute(
"INSERT INTO project.metadata (name) VALUES (%s);",
(self.name,))
curs.execute(
"UPDATE project.metadata SET projectCode = %s "
"WHERE name = %s;",
(self.name, self.name))
conn.commit()
listOfCapabilities = getListOfCapabilities()
# import a capability module from the list
# use a capability to get data from the project, then add that data
# to the row received from before
for capabilityName in listOfCapabilities:
capabilityModule = import_module(capabilityName)
print('.', end='')
result = capabilityModule.tasks.analyze(self.hgdir)
capabilityModule.tasks.updateDb(conn, self.name, result)
# Set scanDone to True in the database
curs.execute(
"UPDATE project.metadata SET scanDone = %s WHERE name = %s;",
(True, self.name))
conn.commit()
print('Done!')
# end of run()
# end of Analyze class
def getListOfCapabilities():
# glob all classes in the capabilities folder
# except the base class (capability.py) and __init__.py
listOfCapabilities = []
unfiltered = glob.glob('capabilities/*.py')
unfiltered.remove('capabilities/capability.py')
unfiltered.remove('capabilities/__init__.py')
for item in unfiltered:
listOfCapabilities.append(item.replace('/', '.').replace('.py', ''))
return listOfCapabilities
| mit | -7,517,978,592,476,462,000 | 34.819277 | 127 | 0.573158 | false |
meppe/ros-ort | src/frcnn/src/frcnn/detector.py | 1 | 7209 | import time
import errno
import sys
ros_slam_path = "/opt/ros-ort"
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/caffe-fast-rcnn/python")
sys.path.insert(0, ros_slam_path+"/src/frcnn/src/py-faster-rcnn/lib")
import rospy
from ort_msgs.msg import Object_bb_list
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.timer import Timer
import numpy as np
import caffe
from threading import Thread
import os
class Detector:
DETECT_RUNNING = False
def __init__(self, classes, prototxt_file, caffemodel_file, args, class_properties=None):
self.classes = classes
self.current_scores = []
self.current_boxes = []
self.current_frame = None
self.current_frame_timestamp = None
self.current_frame_header = None
self.frames_detected = 0
self.detection_start = time.time()
self.args = args
self.CONF_THRESH = args.conf_threshold
# print ("THRESH" + str(self.CONF_THRESH))
self.cls_score_factors = {}
self.set_cls_score_factors(class_properties)
rospy.init_node("frcnn_detector")
print("node initialized")
cfg.TEST.HAS_RPN = True # Use RPN for proposals
prototxt = prototxt_file
caffemodel = caffemodel_file
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./download_caffemodels.sh?').format(caffemodel))
if not os.path.isfile(prototxt):
raise IOError(("{:s} not found.\nMaybe this model is incompatible with the "
"respective network you chose.").format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
print("Set caffe to CPU mode")
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
print("Set caffe to GPU mode, running on GPU {}".format(cfg.GPU_ID))
self.net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _ = im_detect(self.net, im)
# Create bounding box publisher
self.bb_pub = rospy.Publisher('frcnn/bb', Object_bb_list, queue_size=10)
# self.bb_img_pub = rospy.Publisher('frcnn/bb_img', Image, queue_size=1)
self.detection_start = time.time()
self.sub_frames = rospy.Subscriber("/frcnn_input/image_raw", Image, self.cb_frame_rec, queue_size=10)
rospy.spin()
def set_cls_score_factors(self, class_properties):
'''
This sets the factor to multiply the score with, depending on the object property type (e.g., shape, color, class)
:param class_properties:
:return:
'''
if class_properties == None:
return
for prop in class_properties.keys():
score_factor = class_properties[prop][0]
for cls in class_properties[prop][1]:
self.cls_score_factors[cls] = float(score_factor)
def pub_detections(self):
is_keyframe = False
timestamp = self.current_frame_header.seq
# print("Publishing bb with timestamp {}".format(timestamp))
frame_id = self.current_frame_header.frame_id
bb_ul_xs = []
bb_ul_ys = []
bb_lr_xs = []
bb_lr_ys = []
bb_scores = []
obj_labels = []
class_names = []
for cls_ind, cls in enumerate(self.classes[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = self.current_boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]
cls_scores = self.current_scores[:, cls_ind]
for i, b in enumerate(cls_boxes):
score = cls_scores[i]
if cls in self.cls_score_factors.keys():
cls_score_factor = self.cls_score_factors[cls]
score *= cls_score_factor
if float(score) < float(self.CONF_THRESH):
continue
b_ul_x = b[0]
b_ul_y = b[1]
b_lr_x = b[2]
b_lr_y = b[3]
bb_ul_xs.append(b_ul_x)
bb_ul_ys.append(b_ul_y)
bb_lr_xs.append(b_lr_x)
bb_lr_ys.append(b_lr_y)
bb_scores.append(score)
obj_labels.append(cls+"_"+str(i))
class_names.append(cls)
bb_msg = Object_bb_list(frame_id, timestamp, is_keyframe, bb_ul_xs, bb_ul_ys, bb_lr_xs, bb_lr_ys, class_names,
obj_labels, bb_scores)
print("Publishing {} detections.".format(len(obj_labels)))
self.bb_pub.publish(bb_msg)
def frame_detect(self, net, im):
if self.args.cpu_mode:
caffe.set_mode_cpu()
# print("Set caffe to CPU mode")
else:
caffe.set_mode_gpu()
caffe.set_device(self.args.gpu_id)
cfg.GPU_ID = self.args.gpu_id
# print("Set caffe to GPU mode, running on GPU {}".format(cfg.GPU_ID))
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
self.current_scores, self.current_boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, self.current_boxes.shape[0])
def deserialize_and_detect_thread(self, msg):
'''
Start object detection. Parse image message and start frame_detect
:param msg:
:return:
'''
# If detection is not already running start a new detection
if not Detector.DETECT_RUNNING:
Detector.DETECT_RUNNING = True
self.current_frame_header = msg.header
print("Starting detection of frame {}.".format(msg.header.seq))
self.frames_detected += 1
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(msg, msg.encoding)
img = np.asarray(cv_image)
if len(img.shape) == 2:
img = np.asarray([img, img, img])
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 0)
self.current_frame = img
assert(self.net is not None, "No network selected")
if self.net is not None:
self.frame_detect(self.net, img)
self.pub_detections()
now = time.time()
detection_time = now - self.detection_start
fps = self.frames_detected / detection_time
print("Running for {} sec., detection with {} fps.".format(detection_time, fps))
Detector.DETECT_RUNNING = False
# Skip detection if another detection is running already
else:
pass
def cb_frame_rec(self, msg):
t = Thread(target=self.deserialize_and_detect_thread, args=[msg])
t.start()
| gpl-3.0 | -3,395,597,467,868,489,700 | 36.159794 | 122 | 0.569843 | false |
JasonKessler/scattertext | demo_moral_foundations.py | 1 | 1709 | import scattertext as st
convention_df = st.SampleCorpora.ConventionData2012.get_data()
moral_foundations_feats = st.FeatsFromMoralFoundationsDictionary()
corpus = st.CorpusFromPandas(convention_df,
category_col='party',
text_col='text',
nlp=st.whitespace_nlp_with_sentences,
feats_from_spacy_doc=moral_foundations_feats).build()
cohens_d_scorer = st.CohensD(corpus).use_metadata()
term_scorer = cohens_d_scorer.set_categories('democrat', ['republican'])
mfd_df = term_scorer.get_score_df()
print(mfd_df.head())
mfd_df.to_csv('demo_moral_foundations.csv')
print('See demo_moral_foundations.csv for the output.')
html = st.produce_frequency_explorer(corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
term_scorer=st.CohensD(corpus).use_metadata(),
grey_threshold=0,
width_in_pixels=1000,
topic_model_term_lists=moral_foundations_feats.get_top_model_term_lists(),
metadata_descriptions=moral_foundations_feats.get_definitions())
fn = 'demo_moral_foundations.html'
with open(fn, 'wb') as out:
out.write(html.encode('utf-8'))
print('Open ./%s in Chrome.' % (fn))
| apache-2.0 | 6,000,145,424,736,266,000 | 50.787879 | 111 | 0.529549 | false |
krisaju95/NewsArticleClustering | module7_skMeansClustering.py | 1 | 7438 | import pickle
import numpy as np
import pandas as pd
import os
import math
path = "C:/Users/hp/Desktop/FINAL YEAR PROJECT/S8/"
D = set()
A = []
words = set()
dataFrame2 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame2.p'), "rb" ))
dataFrame3 = pickle.load( open(os.path.join(path, 'Feature Set','dataFrame3.p'), "rb" ))
cosineSimilarityMatrix = pickle.load( open(os.path.join(path, 'KMeansClustering','dataFrame4.p'), "rb" ))
wordSetSize = len(dataFrame3.columns)
numberOfDocuments = len(dataFrame3.index)
m = 1
centroids = pickle.load( open(os.path.join(path, 'KMeansClustering','initialCentroids.p'), "rb" ))
dataFrame5 = pd.DataFrame(np.zeros(numberOfDocuments).reshape(numberOfDocuments,1))
clusters = []
previousClusters = []
k = len(centroids.index)
centroidCosineSimilarity = pd.DataFrame(np.zeros(shape = (numberOfDocuments , k)).reshape(numberOfDocuments , k))
# Check if the newly found clusters are the same as the previously found clusters
def convergenceCase():
i =0
if previousClusters == []:
return False
for cluster in clusters:
if cluster != previousClusters[i]:
return False
else:
i = i + 1
return True
# Given two documents, calculate their cosine similarity
def cosineSimilarity(value1 , value2):
d1 = 0
d2 = 0
dotProduct = 0
v1 = value1.as_matrix()
v2 = value2.as_matrix()
document1 = np.square(v1)
document2 = np.square(v2)
dotProduct = np.dot(v1 , v2)
d1 = math.sqrt( document1.sum() )
d2 = math.sqrt( document2.sum() )
if d1 * d2 == 0:
return 0
cosineSimilarityValue = dotProduct/(d1*d2)
return cosineSimilarityValue
# Find the most similar centroid for each document in the dataset
def findMostSimilarCentroids():
mostSimilarValue = 0
mostSimilarCentroid = 0
for row in dataFrame5.index:
mostSimilarValue = 0
mostSimilarCentroid = 0
for column in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , column] > mostSimilarValue:
mostSimilarValue = centroidCosineSimilarity.ix[row , column]
mostSimilarCentroid = column
dataFrame5.ix[row , "ClusterID"] = mostSimilarCentroid
dataFrame5.ix[row , "maxSimilarityValue"] = mostSimilarValue
# Initialize the set D with all the documents from the dataset
def initializeSetD():
for column in cosineSimilarityMatrix.columns:
D.add(column)
# Create the initial set of clusters with k empty lists, each empty list being a cluster
def initializeClusters():
global clusters
clusters = []
for i in range(k):
clusters.append([])
# Initalize a dataframe for the centroid vectors with zero values
def initializeCentroids():
for row in centroids.index:
for word in dataFrame3.columns:
centroids.ix[row , word] = 0
# Find the new centroids for each cluster once the data has been updated
def calculateNewCentroids():
global centroids
initializeCentroids()
clusterID = 0
clusterSizes = [0 , 0 , 0, 0, 0]
dataFrame3Matrix = dataFrame3.as_matrix()
centroidsMatrix = centroids.as_matrix()
centroidColumns = centroids.columns
for row in dataFrame5.index:
clusterID = dataFrame5.ix[row , "ClusterID"]
clusterSizes[int(clusterID)] = clusterSizes[int(clusterID)] + 1
centroidsMatrix[int(clusterID)] = np.add(centroidsMatrix[int(clusterID)] , dataFrame3Matrix[row])
for row in centroids.index:
centroidsMatrix[row] = np.divide(centroidsMatrix[row] , float(clusterSizes[row]))
centroids = pd.DataFrame(centroidsMatrix)
centroids.columns = centroidColumns
# Create a dataframe with cosine similarity values for all documents with each of the centroids
def calculateCosineSimilarity():
for row in range(numberOfDocuments):
document1 = dataFrame3.loc[row , :]
for column in range(k):
document2 = centroids.loc[column , :]
centroidCosineSimilarity.ix[row , column] = cosineSimilarity(document1 , document2)
# Based on the data in df5, place each dcoument in its respective cluster
def generateClusters():
clusterID = 0
initializeClusters()
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
clusters[clusterID].append(row)
# Find the centroid with maximum similarity for a given document and return the clusterID along with the similarity value
def findClosestCluster(row):
maxSimilarityValue = 0
clusterID = 0
for centroid in centroidCosineSimilarity.columns:
if centroidCosineSimilarity.ix[row , centroid] > maxSimilarityValue:
maxSimilarityValue = centroidCosineSimilarity.ix[row , centroid]
clusterID = centroid
return clusterID , maxSimilarityValue
# Create a dataframe with the cluster ID and similarity value for each document
def updateCentroidData():
clusterID = 0
newSimilarityValue = 0
for row in dataFrame5.index:
clusterID = int(dataFrame5.ix[row , "ClusterID"])
if centroidCosineSimilarity.ix[row , clusterID] < dataFrame5.ix[row , "maxSimilarityValue"]:
clusterID , newSimilarityValue = findClosestCluster(row)
dataFrame5.ix[row , "maxSimilarityValue"] = newSimilarityValue
dataFrame5.ix[row , "ClusterID"] = clusterID
else:
dataFrame5.ix[row , "maxSimilarityValue"] = centroidCosineSimilarity.ix[row , clusterID]
# Main function to perform clustering on the dataset
def skMeansClustering():
global previousClusters
print "Performing Spherical K-Means Clustering"
calculateCosineSimilarity()
findMostSimilarCentroids()
generateClusters()
for i in range(50):
calculateNewCentroids()
calculateCosineSimilarity()
updateCentroidData()
generateClusters()
#print dataFrame5
if convergenceCase():
break
else:
print "Clustering iteration " , i + 1
#print centroidCosineSimilarity
previousClusters = list(clusters)
print "Converged in ", i , " iteration(s)"
print "Clusters have been generated"
print "Saving data in DataFrame5 as a pickle package and as a CSV"
dataFrame5.to_pickle(os.path.join(path, 'KMeansClustering','dataFrame5.p'))
dataFrame5.to_csv(os.path.join(path, 'KMeansClustering','dataFrame5.csv'))
print "DataFrame5 has been saved"
skMeansClustering() | gpl-3.0 | 1,653,358,669,395,767,000 | 37.365079 | 330 | 0.607287 | false |
rickyrem/garrulous-api | model/Database.py | 1 | 2228 | # Garrulous API
# Authors: Michael Pierre and Richard Meyers
"""
Copyright (C) 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sqlite3
import os
import logging
import pprint
class Database(object):
def __init__(self):
super(Database, self).__init__()
base_dir = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(os.path.dirname(base_dir), "database")
db_path = os.path.join(db_path, "garrulous.db")
self.conn = sqlite3.connect(db_path)
self.db_cursor = self.conn.cursor()
def write(self, sql, params=()):
"""
Use this method for queries that do not return rows.
:param sql:
:return:
"""
try:
with self.conn:
self.conn.execute(sql, params)
return True
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
def query(self, sql, params=()):
"""
Only use this when a query returns rows.
:param sql:
:return:
"""
try:
self.db_cursor.execute(sql, params)
return self.db_cursor.fetchall()
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
def queryOne(self, sql, params=()):
"""
Only use this when a query returns rows.
:param sql:
:return:
"""
try:
self.db_cursor.execute(sql, params)
return self.db_cursor.fetchone()
except sqlite3.IntegrityError:
print "Could not run sql: " + sql
return False
| apache-2.0 | 6,288,271,877,029,973,000 | 28.315789 | 69 | 0.614901 | false |
jreese/euler | python/problem26.py | 1 | 3006 | import bigfloat
from multiprocessing import Pool
import sys
dmin = 1
dmax = 1000
precision = 8192
fuzz = 5
min_repeats = 3
workers = 1
def is_repeating(substr, whole_string):
p = 0
repeats = 0
remaining = whole_string
while remaining:
if len(remaining) >= len(substr):
if remaining.startswith(substr):
repeats += 1
else:
return 0
else:
if substr[:len(remaining)] == remaining:
repeats += 1
else:
return 0
remaining = remaining[len(substr):]
return repeats
def longest_repeat(d):
context = bigfloat.precision(precision)
result_float = bigfloat.div(1, d, context=context)
result = str(result_float)[2:].strip('0')[:-fuzz]
result_len = len(result)
#print "d = {0}, result = {1}".format(d, result)
longest = ''
longest_len = 0
found = set()
for i in range(result_len):
remaining = result[i:]
for k in range(i+1, result_len):
substr = result[i:k]
substr_len = len(substr)
if substr == '0' * substr_len:
continue
new_substr = True
for f in found:
if substr == f:
new_substr = False
elif is_repeating(f, substr):
new_substr = False
if not new_substr:
continue
#print "new substring {0}".format(substr)
repeats = is_repeating(substr, remaining)
#print "substring {0} repeats {1} times".format(substr, repeats)
if repeats >= min_repeats:
#print "found repeating substring {0} (occurred {1} times)".format(substr, repeats, i=i, k=k)
found.add(substr)
if longest_len < substr_len:
#print "new longest substr!"
longest = substr
longest_len = substr_len
if remaining[1:] == remaining[1] * len(remaining[1:]):
#print "remaining string is all the same"
break
if found:
#print "Already found repeating substrings, short-circuiting"
break
if remaining == remaining[0] * len(remaining):
#print "remaining string is all the same"
break
if longest:
#print "longest substring for d = {0} is {1}".format(d, longest)
pass
return longest
longest_len = 0
longest_substr = ''
longest_d = 0
for d in range(dmin, dmax):
sys.stdout.write('.')
if d % 50 == 0:
sys.stdout.write("%d\n" % d)
sys.stdout.flush()
substr = longest_repeat(d)
substr_len = len(substr)
if substr_len > longest_len:
longest_len = substr_len
longest_d = d
longest_substr = substr
print ""
print ("longest substr: d = {0}, len = {1}, substr = {2}"
"".format(longest_d, longest_len, longest_substr))
| mit | 2,244,327,258,389,847,000 | 23.842975 | 109 | 0.528277 | false |
alipsgh/tornado | streams/readers/arff_reader.py | 1 | 3091 | """
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
class ARFFReader:
"""This class is used to read a .arff file."""
@staticmethod
def read(file_path):
labels = []
attributes = []
attributes_min_max = []
records = []
data_flag = False
reader = open(file_path, "r")
for line in reader:
if line.strip() == '':
continue
if line.startswith("@attribute") or line.startswith("@ATTRIBUTE"):
line = line.strip('\n\r\t')
line = line.split(' ')
attribute_name = line[1]
attribute_value_range = line[2]
attribute = Attribute()
attribute.set_name(attribute_name)
if attribute_value_range.lower() in ['numeric', 'real', 'integer']:
attribute_type = TornadoDic.NUMERIC_ATTRIBUTE
attribute_value_range = []
attributes_min_max.append([0, 0])
else:
attribute_type = TornadoDic.NOMINAL_ATTRIBUTE
attribute_value_range = attribute_value_range.strip('{}').replace("'", "")
attribute_value_range = attribute_value_range.split(',')
attributes_min_max.append([None, None])
attribute.set_type(attribute_type)
attribute.set_possible_values(attribute_value_range)
attributes.append(attribute)
elif line.startswith("@data") or line.startswith("@DATA"):
data_flag = True
labels = attributes[len(attributes) - 1].POSSIBLE_VALUES
attributes.pop(len(attributes) - 1)
continue
elif data_flag is True:
line = re.sub('\s+', '', line)
elements = line.split(',')
for i in range(0, len(elements) - 1):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
elements[i] = float(elements[i])
min_value = attributes_min_max[i][0]
max_value = attributes_min_max[i][1]
if elements[i] < min_value:
min_value = elements[i]
elif elements[i] > max_value:
max_value = elements[i]
attributes_min_max[i] = [min_value, max_value]
records.append(elements)
for i in range(0, len(attributes)):
if attributes[i].TYPE == TornadoDic.NUMERIC_ATTRIBUTE:
attributes[i].set_bounds_values(attributes_min_max[i][0], attributes_min_max[i][1])
return labels, attributes, records
| mit | -7,130,011,598,677,469,000 | 37.126582 | 99 | 0.505015 | false |
nafitzgerald/allennlp | allennlp/models/simple_tagger.py | 1 | 7647 | from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
from torch.nn.modules.linear import Linear
import torch.nn.functional as F
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("simple_tagger")
class SimpleTagger(Model):
"""
This ``SimpleTagger`` simply encodes a sequence of text with a stacked ``Seq2SeqEncoder``, then
predicts a tag for each token in the sequence.
Parameters
----------
vocab : ``Vocabulary``, required
A Vocabulary, required in order to compute sizes for input/output projections.
text_field_embedder : ``TextFieldEmbedder``, required
Used to embed the ``tokens`` ``TextField`` we get as input to the model.
stacked_encoder : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and predicting output tags.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
stacked_encoder: Seq2SeqEncoder,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(SimpleTagger, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.stacked_encoder = stacked_encoder
self.tag_projection_layer = TimeDistributed(Linear(self.stacked_encoder.get_output_dim(),
self.num_classes))
if text_field_embedder.get_output_dim() != stacked_encoder.get_input_dim():
raise ConfigurationError("The output dimension of the text_field_embedder must match the "
"input dimension of the phrase_encoder. Found {} and {}, "
"respectively.".format(text_field_embedder.get_output_dim(),
stacked_encoder.get_input_dim()))
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
initializer(self)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
tags: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
tokens : Dict[str, torch.LongTensor], required
The output of ``TextField.as_array()``, which should typically be passed directly to a
``TextFieldEmbedder``. This output is a dictionary mapping keys to ``TokenIndexer``
tensors. At its most basic, using a ``SingleIdTokenIndexer`` this is: ``{"tokens":
Tensor(batch_size, num_tokens)}``. This dictionary will have the same keys as were used
for the ``TokenIndexers`` when you created the ``TextField`` representing your
sequence. The dictionary is designed to be passed directly to a ``TextFieldEmbedder``,
which knows how to combine different word representations into a single vector per
token in your input.
tags : torch.LongTensor, optional (default = None)
A torch tensor representing the sequence of integer gold class labels of shape
``(batch_size, num_tokens)``.
Returns
-------
An output dictionary consisting of:
logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
unnormalised log probabilities of the tag classes.
class_probabilities : torch.FloatTensor
A tensor of shape ``(batch_size, num_tokens, tag_vocab_size)`` representing
a distribution of the tag classes per word.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text_input = self.text_field_embedder(tokens)
batch_size, sequence_length, _ = embedded_text_input.size()
mask = get_text_field_mask(tokens)
encoded_text = self.stacked_encoder(embedded_text_input, mask)
logits = self.tag_projection_layer(encoded_text)
reshaped_log_probs = logits.view(-1, self.num_classes)
class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view([batch_size,
sequence_length,
self.num_classes])
output_dict = {"logits": logits, "class_probabilities": class_probabilities}
if tags is not None:
loss = sequence_cross_entropy_with_logits(logits, tags, mask)
for metric in self.metrics.values():
metric(logits, tags, mask.float())
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple position-wise argmax over each token, converts indices to string labels, and
adds a ``"tags"`` key to the dictionary with the result.
"""
all_predictions = output_dict['class_probabilities']
all_predictions = all_predictions.cpu().data.numpy()
if all_predictions.ndim == 3:
predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]
else:
predictions_list = [all_predictions]
all_tags = []
for predictions in predictions_list:
argmax_indices = numpy.argmax(predictions, axis=-1)
tags = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
all_tags.append(tags)
output_dict['tags'] = all_tags
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
@classmethod
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger':
embedder_params = params.pop("text_field_embedder")
text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder"))
initializer = InitializerApplicator.from_params(params.pop('initializer', []))
regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
return cls(vocab=vocab,
text_field_embedder=text_field_embedder,
stacked_encoder=stacked_encoder,
initializer=initializer,
regularizer=regularizer)
| apache-2.0 | 652,954,486,819,484,300 | 47.398734 | 102 | 0.629789 | false |
caesar0301/omnilab-misc | OmniperfTools/groundtruth.py | 1 | 4311 | #!/usr/bin/env python
# Ground truth for evaluating Activity-Entity model
#
# By chenxm
#
import os
import sys
import numpy
from PyOmniMisc.traffic.http import HTTPLogReader
from PyOmniMisc.utils import stat
from PyOmniMisc.model.webtree import WebTree
def readUserClickTS(fn):
# read user click time series
ucts = []
i = 0
for line in open(fn, 'rb'):
if i != 0:
line = line.strip('\r\n ')
if len(line) == 0: continue
ucts.append(float(line.split('\t')[0]))
i+=1
return ucts
def readHttpEntries(fn):
# read http logs
etrs = []
for entry in HTTPLogReader(fn):
if entry is not None:
etrs.append(entry)
etrs = [e for e in etrs if e.rqtstart() != None] # remove entity without request times
etrs.sort(key=lambda x: x.rqtstart()) # sort entities by request times
return etrs
def modelGT(trace_folder):
print("Modeling groudtruth..")
# User click files
uc = os.path.join(trace_folder, 'userclicks2.out')
if not os.path.exists(uc):
uc = os.path.join(trace_folder, 'userclicks.out')
if not os.path.exists(uc):
raise Exception("Sry, I do not find userclicks*.out in given folder.")
# Read user clicks
ucts = readUserClickTS(uc)
if len(ucts) == 0:
print("No click times")
sys.exit(-1)
print len(ucts)
# Http log file
hl = os.path.join(trace_folder, 'http_logs')
if not os.path.exists(hl):
raise Exception("Sry, I do not find *http_logs*.out in given folder.")
# Read http logs
etrs = readHttpEntries(hl)
if len(etrs) == 0:
print("No entries")
sys.exit(-1)
# prepare data...
ua_ets = {}
for e in etrs:
ua = e.ua()
if ua not in ua_ets:
ua_ets[ua] = []
ua_ets[ua].append(e)
# time model
forest = {}
for ua in ua_ets:
if ua not in forest:
forest[ua] = []
last = None
tree = []
for e in ua_ets[ua]:
if last is None:
tree.append(e)
else:
if e.rqtstart() - last.rqtstart() <= 3: # sec, request gap
tree.append(e)
elif len(tree) != 0:
forest[ua].append(tree)
tree = []
last = e
# click times
for ua in forest:
removed = []
for tree in forest[ua]:
found = False
for node in tree:
for ts in ucts:
if node.rqtstart() - ts < 2:
found = True
break
if found: break
if not found:
removed.append(tree)
for r in removed:
forest[ua].remove(r)
return forest
def overlap_portion(t1, t2): # t1 covers t2
""" We user FMeasure to measure the distance between two tree
As for t1 covering t2, t2 is treated as the true value, and
t1 is the predicted value.
"""
dup = overlap_cnt(t1, t2)
recall = dup/len(t2)
precision = dup/len(t1)
if recall == 0 and precision == 0:
return None
return stat.FMeasure(precision, recall)
def overlap_cnt(t1, t2):# t1 covers t2
if not isinstance(t1, list) or not isinstance(t2, list) or \
len(t1) == 0 or len(t2) == 0:
raise ValueError("Invalid parameters: list required")
dup = 0.0
for e1 in t1:
for e2 in t2:
if e1 == e2:
dup +=1
break
return dup
def evaluate(forest, forest_gt):
print "Evaluation result:"
uas_target = set(forest.keys())
uas_gt = set(forest_gt.keys())
uas = uas_target & uas_gt
res = []
for ua in uas:
print ua
trees_gt = forest_gt[ua]
trees_target = []
for o in forest[ua]:
# convert format
if isinstance(o, WebTree):
tree = o.fruits()
trees_target.append(tree)
elif isinstance(o, list):
trees_target.append(o)
# evaluate
print "Target: %d, GT: %d" % (len(trees_target),len(trees_gt))
# Entity classified accuracies (in two modes):
# Trace level accuracy--------------------------
fms = []
for t1 in trees_gt:
mx = 0 # match percentage
for t2 in trees_target:
p = overlap_portion(t2, t1)
if p is not None and p > mx:
mx = p
fms.append(mx)
if len(fms) > 0:
m = numpy.mean(fms)
print m
res.append(m)
#-----------------------------------------------
# Activity level accuracy-----------------------
# fms = []
# for t1 in trees_gt:
# mx = 0
# for t2 in trees_target:
# p = overlap_portion(t2, t1)
# if p is not None and p > mx:
# mx = p
# fms.append(mx)
# print fms
# res.extend(fms)
#-----------------------------------------------
return res | gpl-2.0 | -1,255,218,300,794,877,700 | 22.955556 | 90 | 0.597309 | false |
mozilla/FlightDeck | apps/xpi/tests/test_building.py | 1 | 19311 | # coding=utf-8
import commonware
import os
import shutil
import simplejson
import tempfile
import time
from waffle.models import Switch
from mock import Mock
#from nose.tools import eq_
from utils.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from jetpack.models import Module, Package, PackageRevision, SDK
from xpi import xpi_utils
from base.helpers import hashtag
log = commonware.log.getLogger('f.tests')
OLDXPIBUILD = xpi_utils.build
class XPIBuildTest(TestCase):
fixtures = ['mozilla', 'core_sdk', 'users', 'packages']
def setUp(self):
self.hashtag = hashtag()
self.author = User.objects.get(username='john')
self.addon = Package.objects.get(name='test-addon',
author__username='john')
self.library = Package.objects.get(name='test-library')
self.addonrev = self.addon.latest
self.librev = self.library.latest
mod = Module.objects.create(
filename='test_module',
code='// test module',
author=self.author
)
self.librev.module_add(mod)
self.SDKDIR = tempfile.mkdtemp()
self.attachment_file_name = os.path.join(
settings.UPLOAD_DIR, 'test_filename.txt')
handle = open(self.attachment_file_name, 'w')
handle.write('.')
handle.close()
# link core to the latest SDK
self.createCore()
settings.XPI_AMO_PREFIX = "file://%s" % os.path.join(
settings.ROOT, 'apps/xpi/tests/sample_addons/')
self.target_basename = os.path.join(
settings.XPI_TARGETDIR, self.hashtag)
self.backup_get_source_dir = SDK.get_source_dir
SDK.get_source_dir = Mock(return_value=os.path.join(
settings.ROOT, 'lib', settings.TEST_SDK))
def tearDown(self):
xpi_utils.build = OLDXPIBUILD
self.deleteCore()
if os.path.exists(self.SDKDIR):
shutil.rmtree(self.SDKDIR)
if os.path.exists(self.attachment_file_name):
os.remove(self.attachment_file_name)
if os.path.exists('%s.xpi' % self.target_basename):
os.remove('%s.xpi' % self.target_basename)
if os.path.exists('%s.json' % self.target_basename):
os.remove('%s.json' % self.target_basename)
SDK.get_source_dir = self.backup_get_source_dir
def makeSDKDir(self):
os.mkdir('%s/packages' % self.SDKDIR)
def test_package_dir_generation(self):
" test if all package dirs are created properly "
self.makeSDKDir()
package_dir = self.library.latest.make_dir('%s/packages' % self.SDKDIR)
self.failUnless(os.path.isdir(package_dir))
self.failUnless(os.path.isdir(
'%s/%s' % (package_dir, self.library.latest.get_lib_dir())))
def test_save_modules(self):
" test if module is saved "
self.makeSDKDir()
package_dir = self.library.latest.make_dir('%s/packages' % self.SDKDIR)
self.librev.export_modules(
'%s/%s' % (package_dir, self.library.latest.get_lib_dir()))
self.failUnless(os.path.isfile('%s/%s/%s.js' % (
package_dir,
self.library.latest.get_lib_dir(),
'test_module')))
def test_manifest_file_creation(self):
" test if manifest is created properly "
self.makeSDKDir()
package_dir = self.library.latest.make_dir('%s/packages' % self.SDKDIR)
self.librev.export_manifest(package_dir)
self.failUnless(os.path.isfile('%s/package.json' % package_dir))
handle = open('%s/package.json' % package_dir)
manifest_json = handle.read()
manifest = simplejson.loads(manifest_json)
self.assertEqual(manifest, self.librev.get_manifest())
def test_minimal_lib_export(self):
" test if all the files are in place "
self.makeSDKDir()
self.librev.export_files_with_dependencies('%s/packages' % self.SDKDIR)
package_dir = self.librev.get_dir_name('%s/packages' % self.SDKDIR)
self.failUnless(os.path.isdir(package_dir))
self.failUnless(os.path.isdir(
'%s/%s' % (package_dir, self.library.latest.get_lib_dir())))
self.failUnless(os.path.isfile('%s/package.json' % package_dir))
self.failUnless(os.path.isfile('%s/%s/%s.js' % (
package_dir,
self.library.latest.get_lib_dir(),
'test_module')))
def test_addon_export_with_dependency(self):
" test if lib and main.js are properly exported "
self.makeSDKDir()
addon_dir = self.addon.latest.get_dir_name('%s/packages' % self.SDKDIR)
lib_dir = self.library.latest.get_dir_name('%s/packages' % self.SDKDIR)
self.addonrev.dependency_add(self.librev)
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
self.failUnless(os.path.isdir(
'%s/%s' % (addon_dir, self.addon.latest.get_lib_dir())))
self.failUnless(os.path.isdir(
'%s/%s' % (lib_dir, self.library.latest.get_lib_dir())))
self.failUnless(os.path.isfile(
'%s/%s/%s.js' % (
addon_dir,
self.addon.latest.get_lib_dir(),
self.addonrev.module_main)))
def test_addon_export_with_attachment(self):
"""Test if attachment file is copied."""
self.makeSDKDir()
# create attachment in upload dir
handle = open(self.attachment_file_name, 'w')
handle.write('unit test file')
handle.close()
attachment = self.addonrev.attachment_create(
filename='test_filename.txt',
author=self.author
)
attachment.create_path()
attachment.data = ''
attachment.write()
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
self.failUnless(os.path.isfile(self.attachment_file_name))
def test_copying_sdk(self):
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
self.failUnless(os.path.isdir(self.SDKDIR))
def test_minimal_xpi_creation(self):
" xpi build from an addon straight after creation "
tstart = time.time()
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
self.addonrev.export_keys(self.SDKDIR)
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
err = xpi_utils.build(
self.SDKDIR,
self.addon.latest.get_dir_name('%s/packages' % self.SDKDIR),
self.addon.name, self.hashtag, tstart=tstart)
# assert no error output
assert not err[1]
# assert xpi was created
assert os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.isfile('%s.json' % self.target_basename)
def test_addon_with_other_modules(self):
" addon has now more modules "
self.addonrev.module_create(
filename='test_filename',
author=self.author
)
tstart = time.time()
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
self.addonrev.export_keys(self.SDKDIR)
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
err = xpi_utils.build(
self.SDKDIR,
self.addon.latest.get_dir_name('%s/packages' % self.SDKDIR),
self.addon.name, self.hashtag, tstart=tstart)
# assert no error output
assert not err[1]
# assert xpi was created
assert os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.isfile('%s.json' % self.target_basename)
def test_xpi_with_empty_dependency(self):
" empty lib is created "
lib = Package.objects.create(
full_name='Test Library XPI',
author=self.author,
type='l'
)
librev = lib.latest
self.addonrev.dependency_add(librev)
tstart = time.time()
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
self.addonrev.export_keys(self.SDKDIR)
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
err = xpi_utils.build(
self.SDKDIR,
self.addon.latest.get_dir_name('%s/packages' % self.SDKDIR),
self.addon.name, self.hashtag, tstart=tstart)
# assert no error output
assert not err[1]
# assert xpi was created
assert os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.isfile('%s.json' % self.target_basename)
def test_xpi_with_dependency(self):
" addon has one dependency with a file "
self.addonrev.dependency_add(self.librev)
tstart = time.time()
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
self.addonrev.export_keys(self.SDKDIR)
self.addonrev.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
err = xpi_utils.build(
self.SDKDIR,
self.addon.latest.get_dir_name('%s/packages' % self.SDKDIR),
self.addon.name, self.hashtag, tstart=tstart)
# assert no error output
assert not err[1]
# assert xpi was created
assert os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.isfile('%s.json' % self.target_basename)
def test_broken_dependency(self):
# A > B
# B > C
# C > D
# A requires via shortcut modules from api-libs, A, B and C
addon = Package.objects.create(
author=self.author,
full_name='A',
name='a',
type='a')
mod = addon.latest.modules.get()
mod.code += """
require('file');
require('addonAmodule');
require('libBmodule');
// this fails
require('libCmodule');
"""
addon.latest.update(mod)
addon.latest.module_create(
author=addon.author,
filename='addonAmodule',
code="// empty module")
# creating Library B
libB = Package.objects.create(
author=self.author,
full_name='B',
name='b',
type='l')
mod = libB.latest.modules.get()
mod.code = """
require('file');
require('libBmodule');
require('libCmodule');
"""
libB.latest.update(mod)
libB.latest.module_create(
author=addon.author,
filename='libBmodule',
code="// empty module")
# creating Library C
libC = Package.objects.create(
author=self.author,
full_name='C',
name='c',
type='l')
mod = libC.latest.modules.get()
mod.code = """
require('file');
require('libCmodule');
"""
libC.latest.update(mod)
libC.latest.module_create(
author=addon.author,
filename='libCmodule',
code="// empty module")
# adding dependencies
libB.latest.dependency_add(libC.latest)
addon.latest.dependency_add(libB.latest)
celery_eager = settings.CELERY_ALWAYS_EAGER
# if workaround is needed
# STDERR will be empty and XPI file doesn't exist
settings.CELERY_ALWAYS_EAGER = False
response = addon.latest.build_xpi(hashtag=self.hashtag)
assert response[0] and not response[1]
assert not os.path.isfile('%s.xpi' % self.target_basename)
# if workaround is working STDERR isn't empty and XPI is still
# not buillt
Switch.objects.create(name='SDKErrorInStdOutWorkaround',
active=True)
response = addon.latest.build_xpi(hashtag=self.hashtag)
assert response[1]
assert not os.path.isfile('%s.xpi' % self.target_basename)
settings.CELERY_ALWAYS_EAGER = celery_eager
def test_addon_with_deep_dependency(self):
# A > B, C
# B > C
# C > D
# A requires via shortcut modules from api-libs, A, B and C
# B requires via shortcut modules from api-libs, B and C
# C requires via shortcut modules from api-libs, C and D
addon = Package.objects.create(
author=self.author,
full_name='A',
name='a',
type='a')
mod = addon.latest.modules.get()
mod.code += """
require('file');
require('addonAmodule');
require('libBmodule');
require('libCmodule');
require('d/libDmodule');
"""
addon.latest.update(mod)
addon.latest.module_create(
author=addon.author,
filename='addonAmodule',
code="// empty module")
# creating Library B
libB = Package.objects.create(
author=self.author,
full_name='B',
name='b',
type='l')
mod = libB.latest.modules.get()
mod.code = """
require('file');
require('libBmodule');
require('libCmodule');
require('d/libDmodule');
"""
libB.latest.update(mod)
libB.latest.module_create(
author=addon.author,
filename='libBmodule',
code="// empty module")
# creating Library C
libC = Package.objects.create(
author=self.author,
full_name='C',
name='c',
type='l')
mod = libC.latest.modules.get()
mod.code = """
require('file');
require('libCmodule');
require('libDmodule');
"""
libC.latest.update(mod)
libC.latest.module_create(
author=addon.author,
filename='libCmodule',
code="// empty module")
# creating Library D
libD = Package.objects.create(
author=self.author,
full_name='D',
name='d',
type='l')
mod = libD.latest.modules.get()
mod.code = """
require('file');
require('libDmodule');
"""
libD.latest.update(mod)
libD.latest.module_create(
author=addon.author,
filename='libDmodule',
code="// empty module")
# now assigning dependencies
libC.latest.dependency_add(libD.latest)
libB.latest.dependency_add(libC.latest)
addon.latest.dependency_add(libC.latest)
addon.latest.dependency_add(libB.latest)
celery_eager = settings.CELERY_ALWAYS_EAGER
settings.CELERY_ALWAYS_EAGER = False
response = addon.latest.build_xpi(hashtag=self.hashtag)
settings.CELERY_ALWAYS_EAGER = celery_eager
assert not response[1]
assert os.path.isfile('%s.xpi' % self.target_basename)
def test_requiring_by_library_name(self):
# A depends on B
# so, you can do require('B'), and it should be B/lib/index.js
Switch.objects.create(name='LibDirInMainAttributeWorkaround',
active=True)
addon = Package.objects.create(
author=self.author,
full_name='A',
name='a',
type='a')
mod = addon.latest.modules.get()
mod.code += """
require('b');
"""
addon.latest.update(mod)
# creating Library B
libB = Package.objects.create(
author=self.author,
full_name='B',
name='b',
type='l')
# now assigning dependencies
addon.latest.dependency_add(libB.latest)
celery_eager = settings.CELERY_ALWAYS_EAGER
settings.CELERY_ALWAYS_EAGER = False
response = addon.latest.build_xpi(hashtag=self.hashtag)
settings.CELERY_ALWAYS_EAGER = celery_eager
assert not response[1]
assert os.path.isfile('%s.xpi' % self.target_basename)
def test_module_with_utf(self):
mod = Module.objects.create(
filename='test_utf',
code='// Δ
',
author=self.author
)
self.library.latest.module_add(mod)
self.makeSDKDir()
package_dir = self.library.latest.make_dir('%s/packages' % self.SDKDIR)
self.librev.export_modules(
'%s/%s' % (package_dir, self.library.latest.get_lib_dir()))
self.failUnless(os.path.isfile('%s/%s/%s.js' % (
package_dir,
self.library.latest.get_lib_dir(),
'test_module')))
def test_utf8_description(self):
utf8string = 'utf8 Δ
utf8'
self.addon.description = utf8string
self.addon.save()
response = self.addon.latest.build_xpi(hashtag=self.hashtag)
assert not response[1]
assert os.path.isfile('%s.xpi' % self.target_basename)
def test_package_included_multiple_times(self):
""" If separate dependencies require the same library, it shouldn't error """
pack = Package.objects.create(type='l', author=self.author)
packrev = pack.latest
self.librev.dependency_add(packrev)
self.addonrev.dependency_add(packrev)
self.addonrev.dependency_add(self.librev)
self.addonrev.build_xpi(hashtag=self.hashtag)
def test_pk_in_harness(self):
xpi_utils.build = Mock()
Switch.objects.create(name='AddRevisionPkToXPI',
active=True)
self.addonrev.build_xpi(hashtag=self.hashtag)
assert 'harness-option' in xpi_utils.build.call_args[1]['options']
def test_components_classes(self):
log.debug(self.addon.latest.modules.all())
mod = self.addon.latest.modules.all()[0]
mod.code = "Components.classes"
self.addon.latest.update(mod)
log.debug(self.addon.latest.modules.all()[0].code)
assert not os.path.isfile('%s.xpi' % self.target_basename)
response = self.addon.latest.build_xpi(hashtag=self.hashtag)
assert response[1]
assert not os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.exists('%s.json' % self.target_basename)
def test_building_xpi_with_1_12(self):
sdk = SDK.objects.create(version='1.12', dir='addon-sdk-1.12')
package = Package.objects.create(author=self.author, type='a')
tstart = time.time()
xpi_utils.sdk_copy(self.addonrev.sdk.get_source_dir(), self.SDKDIR)
package.latest.export_keys(self.SDKDIR)
package.latest.export_files_with_dependencies(
'%s/packages' % self.SDKDIR)
err = xpi_utils.build(
self.SDKDIR,
package.latest.get_dir_name('%s/packages' % self.SDKDIR),
package.name, self.hashtag, tstart=tstart)
# assert no error output
assert not err[1]
# assert xpi was created
assert os.path.isfile('%s.xpi' % self.target_basename)
assert os.path.isfile('%s.json' % self.target_basename)
| bsd-3-clause | -1,400,908,416,646,009,600 | 36.860784 | 85 | 0.57916 | false |
jeremyosborne/python | general/csv2table/csv2sql.py | 1 | 10575 | """Convert a csv file to an sqlite table."""
import sys
import csv
import sqlite3
import os
import re
# pointer to our csv file descriptor
csvFile = None
columnNames = None
columnTypes = None
columnComments = None
validDataTypes = ["string", "number", "date"]
idColumnName = "_id"
outfileName = None
outfileExtension = ".sqlite3"
tableName = None
def confirm(default=True):
"""Waits for user input, and exits on anything other than a string
that begins with "Y" or "y".
@param [default=True] {Boolean} Default response displayed to the user.
Either "[Y/n]:" (if True) for a default affirmative or "[y/N]:" (if False)
for a default negative.
@return {Boolean} True if the user typed in an affirmative response,
False if not.
"""
if default == True:
print "[Y/n]: ",
else:
print "[n/Y]: ",
response = raw_input()
if len(response) == 0:
return default
elif len(response) and (response.lower()[0] == "y"):
return True
else:
return False
def createTable():
"""Create the sqllite3 table and insert data."""
global idColumnName, columnNames, columnTypes, outfileName
print "\033[1;43m--Building data table--\033[1;m"
print "SQL statements used will be output to the screen for inspection."
print ""
conn = sqlite3.connect(outfileName)
cursor = conn.cursor()
# TODO: confirm with user (default no) before dropping the table
cursor.execute("DROP TABLE IF EXISTS "+tableName)
statement = "CREATE TABLE "+tableName+" ("+idColumnName+" INTEGER PRIMARY KEY ASC\n"
for i in range(len(columnNames)):
statement += ", "+columnNames[i]
if columnTypes[i] == "String":
statement += " TEXT\n"
elif columnTypes[i] == "Number":
statement += " NUMERIC\n"
statement += ")"
print statement
print ""
cursor.execute(statement)
conn.commit()
# Insert Data
csvFile.seek(0)
dataReader = csv.reader(csvFile)
# skip the header rows
counter = 0
for row in dataReader:
if counter < 3:
counter += 1
continue
else:
statement = "INSERT INTO "+tableName+" ("
# skip the id column, let it auto-increment
firstColumn = True
for column in columnNames:
if firstColumn == True:
statement += column
firstColumn = False
else:
statement += ", "+column
statement += ") VALUES ("
firstValue = True
for columnNum in range(len(row)):
# Need to get access to the column types to determine if we
# should quote or not
if firstValue:
firstValue = False
if columnTypes[columnNum] == "String":
statement += "'"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += row[columnNum]
else:
if columnTypes[columnNum] == "String":
statement += ", '"+row[columnNum]+"'"
elif columnTypes[columnNum] == "Number":
statement += ", "+row[columnNum]
statement += ")"
print statement
cursor.execute(statement)
conn.commit()
# clean up
cursor.close()
conn.close()
def computeSchema():
"""Determines the table schema for our csv file."""
global csvFile, columnNames, columnTypes, columnComments
print "\033[1;43m--Computing schema--\033[1;m"
csvFile.seek(0)
schema = csv.reader(csvFile)
counter = 0
for row in schema:
if counter == 0:
columnNames = row
elif counter == 1:
columnTypes = row
elif counter == 2:
columnComments = row
break
counter += 1
print "We assume the first three rows in your csv file contain header info."
print "If the information looks incorrect, you will have an opportunity"
print "to exit and fix the csv file before creating the output table."
print "--------------------------------------------------------------------"
print "Your columns will be named (from the first row of data):"
for column in range(len(columnNames)):
print "{0:>5}: {1}".format(column, columnNames[column])
print "The data types for the columns (from the second row of data):"
for column in range(len(columnTypes)):
print "{0:>5}: {1}".format(column, columnTypes[column])
print "The descriptions of each column (from the third row of data):"
print "NOTE: Comments are ignored for sql table creation."
for column in range(len(columnComments)):
print "{0:>5}: {1}".format(column, columnComments[column])
print ""
def reportFileStats():
"""Report any stats about the csv file."""
# I think we need a new csv reader every time we want to view
# the file.
global csvFile, validDataTypes
print "\033[1;43m--Computing file stats, checking integrity--\033[1;m"
print "Number of columns in your table (determined from the first row):"
csvFile.seek(0)
columncount = 0
counter = csv.reader(csvFile)
for row in counter:
columncount = len(row)
break
print " {0}".format(columncount)
print "Number of rows in the csv file:"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
for row in counter:
rowcount += 1
print " {0}".format(rowcount)
print "Check table integrity: expected number of columns per row?"
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
if len(row) != columncount:
print "Error: row {0} has {1} columns, expected {2}".format(rowcount, len(row), columncount)
isBadTable = True
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected dimensions.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected dimensions.\033[1;m"
print ""
sys.exit(1)
print "Check table integrity: expected data types for each column?"
print "Valid datatypes are:"
for validType in validDataTypes:
print " {0}".format(validType)
csvFile.seek(0)
counter = csv.reader(csvFile)
rowcount = 0
isBadTable = False
for row in counter:
# data types are in the second row
if rowcount == 1:
columncount = 0
for column in row:
if column not in validDataTypes:
print "Error: column {0} has unexpected type {1}".format(columncount, column)
isBadTable = True
columncount += 1
# Only process the data type row
break
else:
rowcount += 1
if isBadTable == False:
print "\033[1;32mTable integrity check PASS: expected datatypes.\033[1;m"
print ""
else:
print "\033[1;31mTable integrity check FAIL: unexpected datatypes.\033[1;m"
print ""
sys.exit(1)
def init(filepath):
"""Kicks off the program by attempting to open the csv file."""
global csvFile, outfileName, tableName
# read stocks data, print status messages
try:
print "\033[1;43m--Opening csv file--\033[1;m"
csvFile = open(filepath, "rb")
print "\033[1;32mOpened csv file:", filepath,"\033[1;m"
# Figure out database name first
outfileMatches = re.match(r"([\w\S]*)(\.[^.]+$)", os.path.basename(filepath))
if outfileMatches == None:
# Handle the case where we don't have something that qualifies
# as an extension to the file
outfileName = filepath+outfileExtension
else:
outfileName = outfileMatches.group(1)+outfileExtension
# Figure out table name from the file name
tableName = re.match(r"([\w\S]*)(\.[^.]+$)", outfileName).group(1)
# Confirm the table and file names with the user
print "The sqlite3 table will be named:", tableName
print "NOTE: If this table already exists in the db file, the pre-existing"
print "data will be deleted (dropped) and lost."
print "Is", tableName, "the correct table name?"
if not confirm():
print "Please input a new table: "
tableName = raw_input()
print "Is", tableName, "the correct able name?"
if not confirm():
print "We must have a table name."
print ""
sys.exit()
print "The sqlite3 file will be named:", outfileName
print "Is this correct?"
if not confirm():
print "Please input the complete file and path to your sqlite3 db: "
outfileName = raw_input()
print "We will attempt to use the file at:", outfileName
print "Is this okay?"
if not confirm():
print "We need an output file."
print ""
sys.exit()
# TODO: choose a base table name, and inform the user that we will
# attempt to use this name as the table name in the database.
#
# TODO: prompt for okayness from the user, default yes
print ""
except IOError:
print "\033[1;31mFailed to open csv file:", sys.exc_info()[1],"\033[1;m"
print ""
sys.exit(1)
if __name__ == "__main__":
try:
if len(sys.argv) < 2:
print "Usage:"
print "python", sys.argv[0], "file2convert.csv"
sys.exit(1)
else:
# process the file
init(sys.argv[1])
reportFileStats()
computeSchema()
createTable()
# natural exit
sys.exit(0)
except SystemExit:
if csvFile:
# Make sure to close the file
csvFile.close()
print "Exiting program."
| mit | 2,478,496,740,230,532,000 | 33.847458 | 104 | 0.552246 | false |
teoliphant/scipy | scipy/sparse/csgraph/tests/test_connected_components.py | 2 | 1443 | import numpy as np
from numpy.testing import assert_, assert_array_almost_equal
from scipy.sparse import csgraph
def test_weak_connections():
Xde = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
Xsp = csgraph.csgraph_from_dense(Xde, null_value=0)
for X in Xsp, Xde:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='weak')
assert_(n_components == 2)
assert_array_almost_equal(labels, [0, 0, 1])
def test_strong_connections():
X1de = np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
X2de = X1de + X1de.T
X1sp = csgraph.csgraph_from_dense(X1de, null_value=0)
X2sp = csgraph.csgraph_from_dense(X2de, null_value=0)
for X in X1sp, X1de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_(n_components == 3)
labels.sort()
assert_array_almost_equal(labels, [0, 1, 2])
for X in X2sp, X2de:
n_components, labels =\
csgraph.connected_components(X, directed=True,
connection='strong')
assert_(n_components == 2)
labels.sort()
assert_array_almost_equal(labels, [0, 0, 1])
| bsd-3-clause | 2,786,668,272,811,028,500 | 29.702128 | 61 | 0.50797 | false |
jpypi/fifar | input_data.py | 1 | 6035 | # Based on scripts at https://github.com/tensorflow/tensorflow/contrib/learn/python/learn/datasets/
'''Dataset utilities'''
import pickle
import collections
from os import path
from tensorflow.python.framework import dtypes
import numpy as np
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
def load_cifar10(data_path):
"""Load the CIFAR10 dataset.
Args:
data_path: string, path to the folder containing the cifar10 dataset
Returns:
Datasets tuple containing the train, validation, and test datasets
"""
train1 = unpickle(path.join(data_path, 'data_batch_1'))
train_data = train1[b'data']
train_target = dense_to_one_hot(train1[b'labels'], 10)
train2 = unpickle(path.join(data_path, 'data_batch_2'))
train_data = np.concatenate((train_data, train2[b'data']), axis=0)
train_target = np.concatenate((train_target, dense_to_one_hot(train2[b'labels'], 10)), axis=0)
train3 = unpickle(path.join(data_path, 'data_batch_3'))
train_data = np.concatenate((train_data, train3[b'data']), axis=0)
train_target = np.concatenate((train_target, dense_to_one_hot(train3[b'labels'], 10)), axis=0)
train_data = train_data.reshape(-1, 32*32*3)
train = DataSet(train_data, train_target)
validate1 = unpickle(path.join(data_path, 'data_batch_4'))
valid_data = validate1[b'data']
valid_target = dense_to_one_hot(validate1[b'labels'], 10)
valid_data = valid_data.reshape(-1, 32*32*3)
validation = DataSet(valid_data, valid_target)
test1 = unpickle(path.join(data_path, 'test_batch'))
test_data = test1[b'data']
test_target = dense_to_one_hot(test1[b'labels'], 10)
test_data = test_data.reshape(-1, 32*32*3)
test = DataSet(test_data, test_target)
return Datasets(train=train, validation=validation, test=test)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
if type(labels_dense) != np.ndarray:
labels_dense = np.asarray(labels_dense)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def unpickle(path):
with open(path, 'rb') as f:
return pickle.load(f, encoding='bytes')
#Dataset class taken shamelessly from tensorflow's MNIST tutorial files
class DataSet(object):
def __init__(self,
images,
labels,
dtype=dtypes.float32,
normalize=True,
reshape=True):
"""Construct a DataSet.
'dtype' can either be 'uint8' to leave the input as '[0, 255]', or 'float32'
to rescale into '[0, 1]'.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0]
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
if normalize:
images = self.preprocess(images)
# Convert shape from [num_examples, rows*columns*channels] to
# [num_examples, rows, columns, channels]
if reshape:
images = images.reshape(-1, 3, 32, 32).transpose(0,2,3,1)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def preprocess(self,images):
'''Normalize the data.'''
sub_mean = np.subtract(images, np.mean(images, axis=0))
div_std = np.divide(sub_mean, np.std(sub_mean, axis=0))
return div_std
def next_batch(self, batch_size, shuffle=True):
'''Return the next 'batch_size' examples from this data set.'''
start = self._index_in_epoch
#Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
np.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
#Go to the next epoch
if start + batch_size > self._num_examples:
#Finished Epoch
self._epochs_completed += 1
#Get ther est of examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
#Shuffle the data
if shuffle:
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
#Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return np.concatenate((images_rest_part, images_new_part), axis=0), \
np.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
| mit | 8,557,386,500,900,142,000 | 33.683908 | 99 | 0.607125 | false |
viaict/viaduct | app/forms/util.py | 1 | 8858 | import inspect
import itertools
from wtforms import RadioField, SubmitField, SelectFieldBase
from app.forms.fields import CustomFormSelectField, \
OrderedSelectMultipleField, OrderedQuerySelectMultipleField
class FieldTabGroup:
"""Represents a group of fields divided into tabs."""
def __init__(self, tabs):
"""Tabs should be a list of FieldTabs."""
self.type = self.__class__.__name__
self.tabs = tabs
# Don't allow empty tabs
if len(tabs) == 0:
raise ValueError('Tabs are empty')
# Check if all tabs are FieldTab
if not all(isinstance(t, FieldTab) for t in tabs):
raise ValueError('Tabs should all be instances of FieldTab')
# First field is used to determine the place of the tab group
self._firstfield = tabs[0].field_names[0]
# Make a list of all fieldnames
self._fieldnames = []
for tab in self.tabs:
self._fieldnames.extend(tab.field_names)
def _set_form(self, form):
"""
Pass the form to the FieldTabGroup.
Internal method used by FormWrapper.
"""
self.form = form
# Build a list of (tabname, fieldlist) tuples,
# where fieldlist contains the field objects itself,
# which is why the form object is required
self._tab_fields = []
for tab in self.tabs:
fields = []
for field_name in tab.field_names:
fields.append(getattr(form, field_name))
self._tab_fields.append((tab.name, fields))
def __iter__(self):
if not hasattr(self, 'form'):
raise ValueError('_set_form should be called before iterating')
return iter(self._tab_fields)
@property
def hex_id(self):
"""Get the id of the object as hexadecimals. (used for rendering)."""
return hex(id(self))[2:]
class FieldTab:
"""
Represents a tab containing fields.
To be used in combination with FieldTabGroup.
"""
def __init__(self, name, field_names):
if len(field_names) == 0:
raise ValueError('Fields are empty')
self.name = name
self.field_names = field_names
def __repr__(self):
return "<{} '{}'>".format(self.__class__.__name, self.name)
class FieldVerticalSplit:
"""
Vertical field splits.
Represents a vertical split of fields,
i.e. fields next to each other.
"""
def __init__(self, field_names, large_spacing=False):
"""
field_names should be a list of list of fields to be splitted.
For example,
[['X1', 'X2'], ['Y1', 'Y2']]
will render as:
[ X1 ] [ Y1 ]
[ X2 ] [ Y2 ]
"""
self.amount_splits = len(field_names)
self.type = self.__class__.__name__
# Allowed amounts of splits which all can be divided evenly
allowed_split_amounts = [2, 3, 4]
if self.amount_splits not in allowed_split_amounts:
raise ValueError("Amount of splits should be equal to one of: {}",
", ".join(map(str, allowed_split_amounts)))
self.field_names_list = field_names
# Make a list of all fieldnames (i.e. flatten the field_names list)
self._fieldnames = []
for fields in self.field_names_list:
self._fieldnames.extend(fields)
# First field is used to determine the place of the vertical split
self._firstfield = field_names[0][0]
if large_spacing:
if self.amount_splits == 2:
self.column_sizes = [5, 5]
self.spacing_sizes = [0, 2]
elif self.amount_splits == 3:
self.column_sizes = [3, 4, 3]
self.spacing_sizes = [0, 1, 1]
elif self.amount_splits == 4:
self.column_sizes = [2, 2, 2, 2]
self.spacing_sizes = [0, 1, 2, 1]
else:
self.column_sizes = [12 // self.amount_splits] * self.amount_splits
self.spacing_sizes = [0] * self.amount_splits
def _set_form(self, form):
"""
Pass the form to the FieldVerticalSplit.
Internal method used by FormWrapper.
"""
self.form = form
self._fields = []
for field_names in self.field_names_list:
fields = []
for field_name in field_names:
fields.append(getattr(form, field_name))
self._fields.append(fields)
def __iter__(self):
if not hasattr(self, 'form'):
raise ValueError('_set_form should be called before iterating')
return iter(self._fields)
class FormWrapper:
"""Helper class for form rendering."""
def __init__(self, form):
self.form = form
self.groups = []
self.vsplits = []
self.ordered_multiselect_fields = []
self.csrf_token = form.csrf_token
self.has_ordered_multiselect_fields = False
self.has_select_fields = False
self.has_custom_form_fields = False
self.has_submit_field = False
for attrname, obj in inspect.getmembers(form):
# Collect the tab groups in the form
if isinstance(obj, FieldTabGroup):
obj.name = attrname
self.groups.append(obj)
# Collect the vertical splits in the form
elif isinstance(obj, FieldVerticalSplit):
obj.name = attrname
self.vsplits.append(obj)
# Check if the form has select fields
elif isinstance(obj, SelectFieldBase) \
and not isinstance(obj, OrderedSelectMultipleField) \
and not isinstance(obj, OrderedQuerySelectMultipleField) \
and not isinstance(obj, RadioField):
self.has_select_fields = True
# Check if the form has ordered multi-select fields
elif isinstance(obj, OrderedSelectMultipleField) \
or isinstance(obj, OrderedQuerySelectMultipleField):
self.has_ordered_multiselect_fields = True
self.ordered_multiselect_fields.append(obj)
# Check if the form has custom form select fields
elif isinstance(obj, CustomFormSelectField):
self.has_select_fields = True
self.has_custom_form_fields = True
# Check if the form has a submit field
elif isinstance(obj, SubmitField):
self.has_submit_field = True
try:
# Dictionary from first field object of a tab group
# to the group object itself
groups_firstfields = {
getattr(form, g._firstfield): g
for g in self.groups
}
# List of all fields belonging to a group
groups_fields = list(map(
lambda f: getattr(form, f), itertools.chain(
*map(lambda g: g._fieldnames, self.groups))))
except TypeError:
raise TypeError('Group field should be a string')
try:
# Dictionary from first field object of a vertial split
# to the vertical split object itself
vsplits_firstfields = {
getattr(form, v._firstfield): v
for v in self.vsplits
}
# List of all fields belonging to a vertical split
vsplit_fields = list(map(
lambda f: getattr(form, f), itertools.chain(
*map(lambda v: v._fieldnames, self.vsplits))))
except TypeError:
raise TypeError('Vertical split field should be a string')
self._fields = []
ignore_fields = []
if hasattr(form, '_RenderIgnoreFields'):
ignore_fields = form._RenderIgnoreFields
for field in form:
# Add the group when the first field occurs in the field list
if field in groups_firstfields:
self._fields.append(groups_firstfields[field])
# Add the vertical split when the first field
# occurs in the field list
elif field in vsplits_firstfields:
self._fields.append(vsplits_firstfields[field])
# Otherwise, add a field when it does not belong to a group
elif (field not in groups_fields and
field not in vsplit_fields and
field.name not in ignore_fields):
self._fields.append(field)
# Give every group and vsplit the form object to make them
# iterable over their tabs/fields
for g in self.groups + self.vsplits:
g._set_form(form)
def __iter__(self):
return iter(self._fields)
| mit | 6,605,752,003,280,129,000 | 32.80916 | 79 | 0.566606 | false |
ilogue/niprov | niprov/pictures.py | 1 | 1378 | from niprov.format import Format
import io, os
_CACHE = {}
class PictureCache(Format):
def __init__(self, dependencies):
cachedir = os.path.expanduser('~/.niprov-snapshots')
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
def new(self):
return io.BytesIO()
def keep(self, picture, for_):
imgId = for_.provenance['id']
if hasattr(picture, 'read'):
picture.seek(0)
bytes = picture.read()
else:
bytes = str(picture)
_CACHE[imgId] = bytes
def getBytes(self, for_):
imgId = for_.provenance['id']
if imgId in _CACHE:
return _CACHE[imgId]
return None
def getFilepath(self, for_):
return self.saveToDisk(for_)
def saveToDisk(self, for_):
imgId = for_.provenance['id']
fpath = os.path.expanduser('~/.niprov-snapshots/{}.png'.format(imgId))
if os.path.isfile(fpath):
return fpath
elif imgId in _CACHE:
with open(fpath, 'w') as picfile:
picfile.write(_CACHE[imgId])
return fpath
else:
return None
def serializeSingle(self, image):
"""Provides file path to picture of image.
This is part of the :class:`.Format` interface.
"""
return self.getFilepath(for_=image)
| bsd-3-clause | 6,516,384,278,792,297,000 | 25.5 | 78 | 0.555878 | false |
jyundt/oval | migrations/versions/628b5fe65b72_rename_current_team_to_current_team_id.py | 1 | 1107 | """Rename current_team to current_team_id
Revision ID: 628b5fe65b72
Revises: a14e1ddd71e2
Create Date: 2016-04-21 11:06:27.786845
"""
# revision identifiers, used by Alembic.
revision = '628b5fe65b72'
down_revision = 'a14e1ddd71e2'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('racer', sa.Column('current_team_id', sa.Integer(), nullable=True))
op.drop_constraint(u'racer_current_team_fkey', 'racer', type_='foreignkey')
op.create_foreign_key(None, 'racer', 'team', ['current_team_id'], ['id'])
op.drop_column('racer', 'current_team')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('racer', sa.Column('current_team', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(None, 'racer', type_='foreignkey')
op.create_foreign_key(u'racer_current_team_fkey', 'racer', 'team', ['current_team'], ['id'])
op.drop_column('racer', 'current_team_id')
### end Alembic commands ###
| gpl-2.0 | -8,020,372,367,204,022,000 | 33.59375 | 103 | 0.679313 | false |
PennNLP/SLURP | semantics/lexical_constants.py | 1 | 1815 | """Word-related constants used by semantics."""
ENTITY_ALIASES = {'me': 'Commander',
'i': 'Commander'}
# Primary verbnet senses for actions
SEARCH_ACTION = "search"
GO_ACTION = "go"
GET_ACTION = "retrieve"
FOLLOW_ACTION = "follow"
SEE_ACTION = "see"
TELL_ACTION = "tell"
BEGIN_ACTION = "begin"
ACTIVATE_ACTION = "activate"
DEACTIVATE_ACTION = "deactivate"
DEFUSE_ACTION = "defuse"
AVOID_ACTION = "avoid"
PATROL_ACTION = "patrol"
CARRY_ACTION = "carry"
STAY_ACTION = "stay"
# Mapping of other verbnet senses to the same actions.
# We include the identity entries just to make things easier on the talkback side
ACTION_ALIASES = {
'appear': GO_ACTION,
'get': GET_ACTION,
'obtain': GET_ACTION,
'meander': GO_ACTION,
'slide': GO_ACTION,
'nonvehicle': GO_ACTION,
'escape': GO_ACTION,
'rummage': SEARCH_ACTION,
'characterize': SEE_ACTION,
'chase': FOLLOW_ACTION,
'lodge': STAY_ACTION,
SEARCH_ACTION: SEARCH_ACTION,
GO_ACTION: GO_ACTION,
GET_ACTION: GET_ACTION,
FOLLOW_ACTION: FOLLOW_ACTION,
SEE_ACTION: SEE_ACTION,
TELL_ACTION: TELL_ACTION,
BEGIN_ACTION: BEGIN_ACTION,
ACTIVATE_ACTION: ACTIVATE_ACTION,
DEACTIVATE_ACTION: DEACTIVATE_ACTION,
AVOID_ACTION: AVOID_ACTION,
PATROL_ACTION: PATROL_ACTION,
CARRY_ACTION: CARRY_ACTION,
STAY_ACTION: STAY_ACTION,
DEFUSE_ACTION: DEFUSE_ACTION,
}
UNDERSTOOD_SENSES = set(ACTION_ALIASES.keys())
| gpl-3.0 | -1,162,972,557,801,783,600 | 33.903846 | 81 | 0.539394 | false |
kpn-digital/py-timeexecution | time_execution/decorator.py | 1 | 2501 | """
Time Execution decorator
"""
import socket
import time
from fqn_decorators import Decorator
from fqn_decorators.asynchronous import AsyncDecorator
from pkgsettings import Settings
SHORT_HOSTNAME = socket.gethostname()
settings = Settings()
settings.configure(backends=[], hooks=[], duration_field="value")
def write_metric(name, **metric):
for backend in settings.backends:
backend.write(name, **metric)
def _apply_hooks(hooks, response, exception, metric, func, func_args, func_kwargs):
metadata = dict()
for hook in hooks:
hook_result = hook(
response=response,
exception=exception,
metric=metric,
func=func,
func_args=func_args,
func_kwargs=func_kwargs,
)
if hook_result:
metadata.update(hook_result)
return metadata
class time_execution(Decorator):
def __init__(self, func=None, **params):
self.start_time = None
super(time_execution, self).__init__(func, **params)
def before(self):
self.start_time = time.time()
def after(self):
duration = round(time.time() - self.start_time, 3) * 1000
metric = {"name": self.fqn, settings.duration_field: duration, "hostname": SHORT_HOSTNAME}
origin = getattr(settings, "origin", None)
if origin:
metric["origin"] = origin
hooks = self.params.get("extra_hooks", [])
disable_default_hooks = self.params.get("disable_default_hooks", False)
if not disable_default_hooks:
hooks = settings.hooks + hooks
# Apply the registered hooks, and collect the metadata they might
# return to be stored with the metrics
metadata = _apply_hooks(
hooks=hooks,
response=self.result,
exception=self.get_exception(),
metric=metric,
func=self.func,
func_args=self.args,
func_kwargs=self.kwargs,
)
metric.update(metadata)
write_metric(**metric)
def get_exception(self):
"""Retrieve the exception"""
if self.exc_info is None:
return
exc_type, exc_value, exc_tb = self.exc_info
if exc_value is None:
exc_value = exc_type()
if exc_value.__traceback__ is not exc_tb:
return exc_value.with_traceback(exc_tb)
return exc_value
class time_execution_async(AsyncDecorator, time_execution):
pass
| apache-2.0 | -1,143,102,933,210,368,000 | 26.483516 | 98 | 0.606158 | false |
XENON1T/cax | cax/config.py | 1 | 11554 | """Configuration routines
"""
import datetime
import time
import json
import logging
import os
import pax
import socket
from zlib import adler32
import pymongo
# global variable to store the specified .json config file
CAX_CONFIGURE = ''
DATABASE_LOG = True
HOST = os.environ.get("HOSTNAME") if os.environ.get("HOSTNAME") else socket.gethostname().split('.')[0]
USER = os.environ.get("USER")
DATA_USER_PDC = 'bobau'
DATA_GROUP_PDC = 'xenon-users'
NCPU = 1
RUCIO_RSE = ''
RUCIO_SCOPE = ''
RUCIO_UPLOAD = None
RUCIO_CAMPAIGN = ''
PAX_DEPLOY_DIRS = {
'midway-login1' : '/project/lgrandi/deployHQ/pax',
'tegner-login-1': '/afs/pdc.kth.se/projects/xenon/software/pax'
}
RUCIO_RULE = ''
# URI for the mongo runs database
RUNDB_URI = 'mongodb://eb:%[email protected]:27017,copslx50.fysik.su.se:27017,zenigata.uchicago.edu:27017/run'
def mongo_password():
"""Fetch passsword for MongoDB
This is stored in an environmental variable MONGO_PASSWORD.
"""
mongo_pwd = os.environ.get('MONGO_PASSWORD')
if mongo_pwd is None:
raise EnvironmentError('Environmental variable MONGO_PASSWORD not set.'
' This is required for communicating with the '
'run database. To fix this problem, Do:'
'\n\n\texport MONGO_PASSWORD=xxx\n\n'
'Then rerun this command.')
return mongo_pwd
def get_user():
"""Get username of user running this
"""
global USER
return USER
def get_hostname():
"""Get hostname of the machine we're running on.
"""
global HOST
if '.' in HOST:
HOST = HOST.split('.')[0]
return HOST
def set_json(config):
"""Set the cax.json file at your own
"""
global CAX_CONFIGURE
CAX_CONFIGURE = config
def set_database_log(config):
"""Set the database update
"""
global DATABASE_LOG
DATABASE_LOG = config
def load():
# User-specified config file
if CAX_CONFIGURE:
filename = os.path.abspath(CAX_CONFIGURE)
# Default config file
else:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, 'cax.json')
logging.debug('Loading config file %s' % filename)
return json.loads(open(filename, 'r').read())
def purge_version(hostname=get_hostname()):
"""
You can select which pax version you want purge
in this way "vX.x.x" where X is the main pax version
and x.x are the different relase. i.e. pax_v1.2.3
"""
return get_config(hostname).get('pax_version_purge',
None)
def purge_settings(hostname=get_hostname()):
return get_config(hostname).get('purge',
None)
def nstream_settings(hostname=get_hostname()):
return get_config(hostname).get('nstreams',
None)
def get_cert(hostname=get_hostname()):
return get_config(hostname).get('grid_cert',
None)
def get_config(hostname=get_hostname()):
"""Returns the cax configuration for a particular hostname
NB this currently reloads the cax.json file every time it is called!!
"""
for doc in load():
if doc['name'] == hostname:
return doc
elif hostname == "upload_tsm":
return hostname
raise LookupError("Unknown host %s" % hostname)
def get_transfer_options(transfer_kind='upload', transfer_method=None):
"""Returns hostnames that the current host can upload or download to.
transfer_kind: 'upload' or 'download'
transfer_method: is specified and not None, return only hosts with which
we can work using this method (e.g. scp)
"""
try:
transfer_options = get_config(get_hostname())[
'%s_options' % transfer_kind]
except LookupError:
logging.info("Host %s has no known transfer options.",
get_hostname())
return []
if transfer_method is not None:
transfer_options = [to for to in transfer_options
if get_config(to['host'])['method'] == 'method']
return transfer_options
def get_pax_options(option_type='versions'):
try:
options = get_config(get_hostname())['pax_%s' % option_type]
except LookupError as e:
logging.info("Pax versions not specified: %s", get_hostname())
return []
return options
def get_dataset_list():
try:
options = get_config(get_hostname())['dataset_list']
except LookupError as e:
logging.debug("dataset_list not specified, operating on entire DB")
return []
return options
def get_task_list():
try:
options = get_config(get_hostname())['task_list']
except LookupError as e:
logging.debug("task_list not specified, running all tasks")
return []
return options
def mongo_collection(collection_name='runs_new'):
# For the event builder to communicate with the gateway, we need to use the DAQ network address
# Otherwise, use the internet to find the runs database
if get_hostname().startswith('eb'):
c = pymongo.MongoClient('mongodb://eb:%s@gw:27017/run' % os.environ.get('MONGO_PASSWORD'))
else:
uri = RUNDB_URI
uri = uri % os.environ.get('MONGO_PASSWORD')
c = pymongo.MongoClient(uri,
replicaSet='runs',
readPreference='secondaryPreferred')
db = c['run']
collection = db[collection_name]
return collection
def data_availability(hostname=get_hostname()):
collection = mongo_collection()
results = []
for doc in collection.find({'detector': 'tpc'},
['name', 'data']):
for datum in doc['data']:
if datum['status'] != 'transferred':
continue
if 'host' in datum and datum['host'] != hostname:
continue
results.append(doc)
return results
def processing_script(args={}):
host = get_hostname()
if host not in ('midway-login1', 'tegner-login-1'):
raise ValueError
midway = (host == 'midway-login1')
default_args = dict(host=host,
use='cax',
number=333,
ncpus=1 if midway else 1,
mem_per_cpu=2000,
pax_version=(('v%s' % pax.__version__) if midway else 'head'),
partition='' if midway else '#SBATCH --partition=main',
# partition='xenon1t' if midway else 'main',
# partition='kicp' if midway else 'main',
base='/project/lgrandi/xenon1t' if midway else '/cfs/klemming/projects/xenon/xenon1t',
account='pi-lgrandi' if midway else 'xenon',
time='48:00:00',
anaconda='/project/lgrandi/anaconda3/bin' if midway else '/cfs/klemming/nobackup/b/bobau/ToolBox/TestEnv/Anaconda3/bin',
extra='',
# extra='#SBATCH --qos=xenon1t' if midway else '#SBATCH -t 72:00:00',
# extra='#SBATCH --qos=xenon1t-kicp' if midway else '#SBATCH -t 72:00:00',
stats='sacct -j $SLURM_JOB_ID --format="JobID,NodeList,Elapsed,AllocCPUS,CPUTime,MaxRSS"' if midway else ''
)
for key, value in default_args.items():
if key not in args:
args[key] = value
# Evaluate {variables} within strings in the arguments.
args = {k:v.format(**args) if isinstance(v, str) else v for k,v in args.items()}
os.makedirs(args['base']+"/"+args['use']+("/%s"%str(args['number']))+"_"+args['pax_version'], exist_ok=True)
# Script parts common to all sites
script_template = """#!/bin/bash
#SBATCH --job-name={use}_{number}_{pax_version}
#SBATCH --ntasks=1
#SBATCH --cpus-per-task={ncpus}
#SBATCH --mem-per-cpu={mem_per_cpu}
#SBATCH --time={time}
#SBATCH --output={base}/{use}/{number}_{pax_version}/{number}_{pax_version}_%J.log
#SBATCH --error={base}/{use}/{number}_{pax_version}/{number}_{pax_version}_%J.log
#SBATCH --account={account}
{partition}
{extra}
export PATH={anaconda}:$PATH
export JOB_WORKING_DIR={base}/{use}/{number}_{pax_version}
mkdir -p ${{JOB_WORKING_DIR}}
cd ${{JOB_WORKING_DIR}}/..
#ln -sf {base}/{use}/pax_* .
source activate pax_{pax_version}
HOSTNAME={host}
{command} --log-file ${{JOB_WORKING_DIR}}/cax.log
chgrp -R xenon1t-admins ${{JOB_WORKING_DIR}}
rm -f ${{JOB_WORKING_DIR}}/pax_event_class*
{stats}
""".format(**args)
return script_template
def get_base_dir(category, host):
destination_config = get_config(host)
# Determine where data should be copied to
return destination_config['dir_%s' % category]
def get_raw_base_dir(host=get_hostname()):
return get_base_dir('raw', host)
def get_processing_base_dir(host=get_hostname()):
return get_base_dir('processed', host)
def get_processing_dir(host, version):
return os.path.join(get_processing_base_dir(host),
'pax_%s' % version)
def get_minitrees_base_dir(host=get_hostname()):
return get_base_dir('minitrees', host)
def get_minitrees_dir(host, version):
return os.path.join(get_minitrees_base_dir(host),
'pax_%s' % version)
def adjust_permission_base_dir(base_dir, destination):
"""Set ownership and permissons for basic folder of processed data (pax_vX)"""
if destination=="tegner-login-1":
#Change group and set permissions for PDC Stockholm
user_group = DATA_USER_PDC + ":" + DATA_GROUP_PDC
subprocess.Popen( ["chown", "-R", user_group, base_dir],
stdout=subprocess.PIPE )
subprocess.Popen( ["setfacl", "-R", "-M", "/cfs/klemming/projects/xenon/misc/basic", base_dir],
stdout=subprocess.PIPE )
def get_adler32( fname ):
"""Calcualte an Adler32 checksum in python
Used for cross checks with Rucio
"""
BLOCKSIZE=256*1024*1024
asum = 1
with open(fname, "rb") as f:
while True:
data = f.read(BLOCKSIZE)
if not data:
break
asum = adler32(data, asum)
if asum < 0:
asum += 2**32
return hex(asum)[2:10].zfill(8).lower()
#Rucio stuff:
def set_rucio_rse( rucio_rse):
"""Set the rucio rse information manually
"""
global RUCIO_RSE
RUCIO_RSE = rucio_rse
def set_rucio_scope( rucio_scope):
"""Set the rucio scope information manually
"""
global RUCIO_SCOPE
RUCIO_SCOPE = rucio_scope
def set_rucio_upload( rucio_upload ):
global RUCIO_UPLOAD
RUCIO_UPLOAD = rucio_upload
def set_rucio_campaign( rucio_campaign ):
global RUCIO_CAMPAIGN
RUCIO_CAMPAIGN = rucio_campaign
def set_rucio_rules( config_rule ):
"""Set the according config file to define the rules for transfer"""
global RUCIO_RULE
RUCIO_RULE = config_rule
def get_science_run( timestamp ):
#Evaluate science run periods:
#1) Change from sc0 to sc1:
dt = datetime.datetime(2017, 2, 2, 17, 40)
time_sr0_to_sr1 = time.mktime(dt.timetuple())
science_run = RUCIO_CAMPAIGN
#Evaluate the according science run number:
if timestamp <= time_sr0_to_sr1:
science_run = "SR000"
elif timestamp > time_sr0_to_sr1:
science_run = "SR001"
return science_run
| isc | -8,276,867,518,926,652,000 | 29.246073 | 144 | 0.602648 | false |
gisce/markdown-i18n | tests/test_i18n.py | 1 | 9781 | # encoding=utf-8
from __future__ import unicode_literals
import unittest
import os
import tempfile
import shutil
import re
from markdown import markdown, Markdown
from babel.messages import pofile, mofile, catalog
def clean_xml(xml_string):
return re.sub('\s+<', '<', xml_string).strip()
class TempDir(object):
def __init__(self):
self.dir = tempfile.mkdtemp()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.dir)
class I18nTest(unittest.TestCase):
"""Basic test for i18n jinja
"""
def setUp(self):
self.dir = tempfile.mkdtemp()
locale = 'es_ES'
self.catalog = catalog.Catalog(locale=locale)
os.mkdir(os.path.join(self.dir, locale))
lc_messages = os.path.join(self.dir, locale, 'LC_MESSAGES')
os.mkdir(lc_messages)
self.po_file = os.path.join(lc_messages, 'messages.po')
def tearDown(self):
shutil.rmtree(self.dir)
def write_po(self):
with open(self.po_file, 'w') as f:
pofile.write_po(f, self.catalog)
def markdown(self, text, extensions=None, extension_configs=None):
if extensions is None:
extensions = []
extensions.append('markdown_i18n')
if extension_configs is None:
extension_configs = {}
if 'markdown_i18n' not in extension_configs:
extension_configs['markdown_i18n'] = {
'i18n_dir': self.dir,
'i18n_lang': 'es_ES'
}
return markdown(
text,
extensions=extensions,
extension_configs=extension_configs
)
def test_create_pot_file(self):
pot_file = os.path.join(self.dir, 'messages.pot')
text = "this is a simple text"
self.markdown(text)
self.assertTrue(os.path.exists(pot_file))
with open(pot_file, 'r') as f:
catalog = pofile.read_po(f)
self.assertIn(text, catalog)
self.assertEqual(len(catalog), 1)
def test_basic_text(self):
text = "this is a simple text"
expected = '<p>esto es un simple test</p>'
self.catalog.add("this is a simple text", "esto es un simple test")
self.write_po()
result = self.markdown(text)
self.assertEqual(expected, result)
def test_newline_text(self):
text = "this is a simple\ntext"
expected = '<p>esto es un simple\ntest</p>'
self.catalog.add("this is a simple\ntext", "esto es un simple\ntest")
self.write_po()
result = self.markdown(text)
self.assertEqual(expected, result)
def test_quoted_text(self):
text = 'this is a simple "text"'
expected = '<p>esto es un simple "test"</p>'
self.catalog.add('this is a simple "text"', 'esto es un simple "test"')
self.write_po()
result = self.markdown(text)
self.assertEqual(expected, result)
def test_multi_paragraph(self):
text = "paragraph 1\n\nparagraph 2"
expected = '<p>parrafo 1</p>'
expected += '<p>parrafo 2</p>'
self.catalog.add('paragraph 1', 'parrafo 1')
self.catalog.add('paragraph 2', 'parrafo 2')
self.write_po()
result = self.markdown(text)
self.assertEqual(expected, result)
def test_headers(self):
for x in range(1, 7):
text = "{0} This is a h{1}".format('#' * x, x)
expected = '<h{0} id="esto-es-un-h{0}">Esto es un h{0}</h{0}>'.format(x)
with TempDir() as d:
c = catalog.Catalog(locale='es_ES')
c.add('This is a h{0}'.format(x), 'Esto es un h{0}'.format(x))
os.mkdir(os.path.join(d.dir, 'es_ES'))
lc_messages = os.path.join(d.dir, 'es_ES', 'LC_MESSAGES')
os.mkdir(lc_messages)
mo_file = os.path.join(lc_messages, 'messages.mo')
with open(mo_file, 'w') as f:
mofile.write_mo(f, c)
result = self.markdown(
text,
extensions=['markdown.extensions.toc'],
extension_configs={
'markdown_i18n': {
'i18n_dir': d.dir,
'i18n_lang': 'es_ES'
}
}
)
self.assertEqual(expected, result)
def test_ulists(self):
text = "* First element.\n * Second element.\n"
expected = """<ul>
<li>Primer elemento.</li>
<li>Segundo elemento.</li>
</ul>"""
self.catalog.add("First element.", "Primer elemento.")
self.catalog.add("Second element.", "Segundo elemento.")
self.write_po()
result = self.markdown(text)
self.assertEqual(clean_xml(expected), clean_xml(result))
def test_nlists(self):
text = "1. First element.\n 2. Second element.\n"
expected = """<ol>
<li>Primer elemento.</li>
<li>Segundo elemento.</li>
</ol>"""
self.catalog.add("First element.", "Primer elemento.")
self.catalog.add("Second element.", "Segundo elemento.")
self.write_po()
result = self.markdown(text)
self.assertEqual(clean_xml(expected), clean_xml(result))
def test_merge_existing_pot(self):
pot_file = os.path.join(self.dir, 'messages.pot')
text1 = "this is a simple text"
self.markdown(text1)
self.assertTrue(os.path.exists(pot_file))
text2 = "another text"
self.markdown(text2)
with open(pot_file, 'r') as f:
po_file = pofile.read_po(f)
self.assertEqual(len(po_file), 2)
self.assertIn(text1, po_file)
self.assertIn(text2, po_file)
def test_no_translate_code(self):
text = ('```bash\n'
'$ python --version\n'
'Python 2.7.2\n'
'$ pip --version\n'
'pip 1.5.2\n'
'```')
expected = ('<pre><code class="bash">$ python --version\n'
'Python 2.7.2\n'
'$ pip --version\n'
'pip 1.5.2\n'
'</code></pre>')
result = self.markdown(
text,
extensions=['markdown.extensions.fenced_code']
)
self.assertEqual(clean_xml(expected), clean_xml(result))
def test_tables(self):
text = """
First Header | Second Header
------------- | -------------
Content 1 | Content 2
"""
expected = """<table>
<thead>
<tr>
<th>Encabezamiento primero</th>
<th>Encabezamiento segundo</th>
</tr>
</thead>
<tbody>
<tr>
<td>Contenido 1</td>
<td>Contenido 2</td>
</tr>
</tbody>
</table>"""
self.catalog.add("First Header", "Encabezamiento primero")
self.catalog.add("Second Header", "Encabezamiento segundo")
self.catalog.add("Content 1", "Contenido 1")
self.catalog.add("Content 2", "Contenido 2")
self.write_po()
result = self.markdown(
text,
extensions=['markdown.extensions.tables']
)
self.assertEqual(clean_xml(expected), clean_xml(result))
def test_admonition(self):
text = (
"!!!note\n"
" This is a note."
)
expected = (
'<div class="admonition note">'
' <p class="admonition-title">Note</p>'
' <p>Esto es una nota.</p>'
'</div>'
)
self.catalog.add("This is a note.", "Esto es una nota.")
self.write_po()
result = self.markdown(
text,
extensions=['markdown.extensions.admonition'],
)
self.assertEqual(clean_xml(expected), clean_xml(result))
def test_code_tag(self):
text = 'ports like: `"com1", "com2"`'
expected = '<p>puertos como: <code>"com1", "com2"</code></p>'
self.catalog.add(
'ports like: <code>"com1", "com2"</code>',
'puertos como:<code>"com1", "com2"</code>'
)
self.write_po()
result = self.markdown(text)
self.assertEqual(clean_xml(result), clean_xml(expected))
def test_i18n_always_after_toc(self):
text = '# This is h1'
expected_toc = (
'<div class="toc">'
' <ul>'
' <li><a href="#esto-es-h1">Esto es h1</a></li>'
' </ul>'
'</div>'
)
self.catalog.add(
'This is h1',
'Esto es h1'
)
self.write_po()
md = Markdown(
extensions=['markdown.extensions.toc', 'markdown_i18n'],
extension_configs={
'markdown_i18n': {
'i18n_dir': self.dir,
'i18n_lang': 'es_ES'
}
}
)
md.convert(text)
toc = getattr(md, 'toc', '')
self.assertEqual(clean_xml(toc), clean_xml(expected_toc))
def test_original_string_unscaped_entities(self):
text = '**Hello** VoilΓ '
expected_original = '<strong>Hello</strong> VoilΓ '
expect_text = '<p><strong>Hola</strong> VoilΓ </p>'
self.catalog.add(
'<strong>Hello</strong> VoilΓ ',
'<strong>Hola</strong> VoilΓ '
)
self.write_po()
result = self.markdown(text)
pot_file = os.path.join(self.dir, 'messages.pot')
with open(pot_file, 'r') as f:
po = pofile.read_po(f)
self.assertEqual(
clean_xml(po._messages.keys()[0]), clean_xml(expected_original)
)
self.assertEqual(clean_xml(result), clean_xml(expect_text))
| mit | 666,288,264,446,547,800 | 27.668622 | 84 | 0.525266 | false |
oxnz/algorithms | leetcode/SimplifyPath.py | 1 | 1845 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
#
# Filename: SimplifyPath.py
#
# Author: Oxnz
# Email: [email protected]
# Created: [2014-12-01 18:54:04 CST]
# Last-update: 2014-12-01 18:54:04 CST
# Description: ANCHOR
#
# Version: 0.0.1
# Revision: [None]
# Revision history: [None]
# Date Author Remarks: [None]
#
# License:
# Copyright (c) 2013 Oxnz
#
# Distributed under terms of the [LICENSE] license.
# [license]
#
# ===============================================================
#
class Solution:
# @param path, a string
# @return a string
def simplifyPath(self, path):
parts = path.split('/')
length = len(parts)
pathv = ['']
needsep = True
i = 0
while i < len(parts):
if parts[i] == '':
i += 1
elif parts[i] == '..':
i += 1
if len(pathv):
pathv.pop()
elif parts[i] == '.':
i += 1
else:
pathv.append(parts[i])
i += 1
path = '/'.join(pathv)
if path == '':
return '/'
elif path[0] != '/':
path = '/' + path
return path
import unittest
import os.path
class TestSolution(unittest.TestCase):
def setUp(self):
self._simpath = Solution().simplifyPath
def test_case(self):
for path in {
'/': '/',
'/../': '/',
'/home//foo': '/home/foo',
'/../../../': '/',
'/././../../././': '/',
'/a/./b///../c/../././../d/..//../e/./f/./g/././//.//h///././/..///': '/e/f/g',
}:
self.assertEqual(os.path.abspath(path), self._simpath(path))
if __name__ == '__main__':
unittest.main()
| mit | 1,549,973,420,774,528,500 | 23.276316 | 91 | 0.407588 | false |
charlyoleg/Cnc25D | cnc25d/outline_backends.py | 1 | 36658 | # outline_backends.py
# common interface to create lines, arcs and circles for the backends freecad, dxfwrite, svgwrite and Tkinter
# created by charlyoleg on 2013/06/21
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
"""
outline_backends.py provides a common API to create lines, arcs and circles with freecad, dxfwrite, svgwrite and Tkinter (via display_backend.py)
"""
################################################################
# python behavior
################################################################
from __future__ import division # to get float division
################################################################
# header for Python / FreeCAD compatibility
################################################################
import importing_freecad
importing_freecad.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
#FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI
################################################################
# import
################################################################
import Part
from FreeCAD import Base
import math
import sys, argparse
import svgwrite
from dxfwrite import DXFEngine
import Tkinter
import time # for time.sleep to help Tkinter to finish properly
import display_backend
import cnc_outline # just used in figure_simple_display() for cnc_outline.outline_rotate, closed(), check_outline_format() and ideal_outline()
import export_2d # just for test enhancement
import design_help # just for get_effective_args() and mkdir_p
################################################################
# global variable
################################################################
unit_circle_resolution = 12 #6
#default_dxf_layer_name = 'CNC25D'
global_epsilon_length = math.pi/1000
global_epsilon_angle = math.pi/10000
################################################################
# ******** sub-functions for the API ***********
################################################################
def complete_circle(ai_center, ai_radius, ai_resolution):
""" Generate a list of points that creates a circle with the resolution ai_resolution.
ai_resolution sets the mamximum number of intermediate points to create
"""
r_points = []
# calculation of the angle resolution:
if(ai_resolution<3):
print("ERR821: The ai_resolution is smaller than 3. Current ai_resolution = {:d}".format(ai_resolution))
sys.exit(2)
#print("dbg424: ai_radius:", ai_radius)
circle_resolution = int(ai_resolution * ai_radius) # circle resolution increase with the radius
angle_resolution = 2*math.pi/circle_resolution
# create the list of points
for i in range(circle_resolution):
r_points.append([ai_center[0]+ai_radius*math.cos(i*angle_resolution), ai_center[1]+ai_radius*math.sin(i*angle_resolution)])
return(r_points)
def arc_3_points_to_radius_center_angles(ai_start, ai_middle, ai_end):
""" From three points (A,B,C: equivalent to 6 floats) computes the radius, the center (I) and the angles ((Ix,IA), (Ix,IB), (Ix,IC)) of the arc passing through A, B and C
"""
# interpretation of the three points
ptax = ai_start[0]
ptay = ai_start[1]
ptbx = ai_middle[0]
ptby = ai_middle[1]
ptcx = ai_end[0]
ptcy = ai_end[1]
#print("dbg501: pta: {:6.01f} {:6.01f}".format(ptax, ptay))
#print("dbg502: ptb: {:6.01f} {:6.01f}".format(ptbx, ptby))
#print("dbg503: ptc: {:6.01f} {:6.01f}".format(ptcx, ptcy))
# epsilon definiton to be tolerant to calculation imprecision
#epsilon = math.pi/1000 # can be used to compare radian and sine
length_epsilon = global_epsilon_length # to speed up run time
angle_epsilon = global_epsilon_angle # to speed up run time
#print("dbg747: length_epsilon:", length_epsilon)
# check
if((ptax==ptbx)and(ptay==ptby)):
print("ERR807: Error, point_A and point_B are identical!")
sys.exit(2)
if((ptbx==ptcx)and(ptby==ptcy)):
print("ERR808: Error, point_B and point_C are identical!")
sys.exit(2)
if((ptax==ptcx)and(ptay==ptcy)):
print("ERR809: Error, point_A and point_C are identical!")
sys.exit(2)
## check the documentation for the explanation of the following calculation
# length of [AB] and [BC]
lab = math.sqrt((ptbx-ptax)**2+(ptby-ptay)**2)
lbc = math.sqrt((ptcx-ptbx)**2+(ptcy-ptby)**2)
if(lab<length_epsilon):
print("ERR811: Error, A and B are almost identical")
print("dbg559: pta={:0.2f} {:0.2f} ptb={:0.2f} {:0.2f} ptc={:0.2f} {:0.2f}".format(ptax, ptay, ptbx, ptby, ptcx, ptcy))
sys.exit(2)
if(lbc<length_epsilon):
print("ERR812: Error, B and C are almost identical")
sys.exit(2)
# calculation of cos(e), cos(f), sin(e) and sin(f)
cos_e = (ptbx-ptax)/lab
cos_f = (ptcx-ptbx)/lbc
sin_e = (ptby-ptay)/lab
sin_f = (ptcy-ptby)/lbc
#print("dbg304: cos_e: ", cos_e)
#print("dbg305: sin_e: ", sin_e)
#print("dbg306: cos_f: ", cos_f)
#print("dbg307: sin_f: ", sin_f)
is_colinear = (math.copysign(1, sin_e)*cos_e)-(math.copysign(1,sin_f)*cos_f)
#print("dbg556: is_colinear:", is_colinear)
if(abs(is_colinear)<angle_epsilon):
#print("ERR810: Error, A, B, C are colinear. Arc can not be created!")
#sys.exit(2)
if(lab>100*length_epsilon):
pass # to let comment the following warning
#print("WARN810: Arc ABC is replaced by the line AC, because A,B,C are colinear!")
#print("dbg559: A= {:0.2f} {:0.2f} B= {:0.2f} {:0.2f} C= {:0.2f} {:0.2f}".format(ptax, ptay, ptbx, ptby, ptcx, ptcy))
#print("dbg558: is_colinear:", is_colinear)
#print("dbg557: lab:", lab)
r_a3ptrca = (0, 0, 0, 0, 0, 0, 0, 0, 0)
return(r_a3ptrca)
# Calculation of M and N
ptmx = (ptax+ptbx)/2
ptmy = (ptay+ptby)/2
ptnx = (ptbx+ptcx)/2
ptny = (ptby+ptcy)/2
#print("dbg134: ptmx:", ptmx)
#print("dbg135: ptmy:", ptmy)
#print("dbg136: ptnx:", ptnx)
#print("dbg137: ptny:", ptny)
# calculation of I
lix = cos_e*sin_f-cos_f*sin_e
kix = sin_f*(cos_e*ptmx+sin_e*ptmy)-sin_e*(cos_f*ptnx+sin_f*ptny)
liy = sin_e*cos_f-sin_f*cos_e
kiy = cos_f*(cos_e*ptmx+sin_e*ptmy)-cos_e*(cos_f*ptnx+sin_f*ptny)
if(abs(lix)<angle_epsilon):
print("ERR813: Error, A, B and C are almost colinear. Arc can not be created!")
sys.exit(2)
if(abs(liy)<angle_epsilon):
print("ERR814: Error, A, B and C are almost colinear. Arc can not be created!")
sys.exit(2)
#print("dbg124: lix:", lix)
#print("dbg125: kix:", kix)
#print("dbg126: liy:", liy)
#print("dbg127: kiy:", kiy)
ptix = kix / lix
ptiy = kiy / liy
#print("dbg505: pti: {:6.02f} {:6.02f}".format(ptix, ptiy))
# length of [IA], [IB] and [IC]
lia = math.sqrt((ptax-ptix)**2+(ptay-ptiy)**2)
lib = math.sqrt((ptbx-ptix)**2+(ptby-ptiy)**2)
lic = math.sqrt((ptcx-ptix)**2+(ptcy-ptiy)**2)
if(abs(lib-lia)>length_epsilon):
#print("dbg404: lia:", lia)
#print("dbg405: lib:", lib)
print("ERR815: I is not equidistant from A and B!")
sys.exit(2)
if(abs(lic-lib)>length_epsilon):
#print("dbg402: lib:", lib)
#print("dbg403: lic:", lic)
print("ERR816: I is not equidistant from B and C!")
sys.exit(2)
# calculation of the angle u=(Ix, IA) , v=(Ix, IB) and w=(Ix, IC)
u = math.atan2(ptay-ptiy, ptax-ptix)
v = math.atan2(ptby-ptiy, ptbx-ptix)
w = math.atan2(ptcy-ptiy, ptcx-ptix)
# calculation of the angle uv=(IA, IB), uw=(IA, IC) vw=(IB, IC)
uv = math.fmod(v-u+4*math.pi, 2*math.pi)
uw = math.fmod(w-u+4*math.pi, 2*math.pi)
vw = math.fmod(w-v+4*math.pi, 2*math.pi)
# check arc direction
ccw_ncw = 1
if(uw>uv):
#print("dbg874: arc of circle direction: counter clock wise (CCW)")
ccw_ncw = 1
else:
#print("dbg875: arc of circle direction: clock wise (CW)")
ccw_ncw = 0
uv = uv - 2*math.pi
vw = vw - 2*math.pi
uw = uw - 2*math.pi
r_a3ptrca = (lia, ptix, ptiy, u, v, w, uv, vw, uw)
return(r_a3ptrca)
def arc_of_circle(ai_start, ai_middle, ai_end, ai_resolution):
""" From three points (list of 6 floats) creates a polyline (list of 2*n floats) representing the arc of circle defined by the three points
ai_resolution sets the maximum number of intermediate points to create
"""
### precision
#epsilon = math.pi/1000 # can be used to compare radian and sine
#length_epsilon = global_epsilon_length # to speed up run time
#angle_epsilon = global_epsilon_angle # to speed up run time
### get radius, center and angles
(lia, ptix, ptiy, u, v, w, uv, vw, uw) = arc_3_points_to_radius_center_angles(ai_start, ai_middle, ai_end)
### colinear case
if(lia==0):
r_polyline = (ai_start, ai_end)
return(r_polyline)
### real arc case
# calculation of the angle resolution:
if(ai_resolution<3):
print("ERR821: The ai_resolution is smaller than 3. Current ai_resolution = {:d}".format(ai_resolution))
sys.exit(2)
#print("dbg414: arc radius: lia:", lia)
circle_resolution = ai_resolution * lia # angle resolution increase with the radius
ar = 2*math.pi/circle_resolution
# number of intermediate point between A and B and step angle
abip = int(abs(uv)/ar)
absa = uv/(abip+1)
#print("dbg741: uv angle resolution: absa:", absa)
# number of intermediate point between B and C and step angle
bcip = int(abs(vw)/ar)
bcsa = vw/(bcip+1)
#print("dbg742: vw angle resolution: bcsa:", bcsa)
# polyline construction
r_polyline = []
r_polyline.append(ai_start)
for i in range(abip):
r_polyline.append([ptix+lia*math.cos(u+(i+1)*absa), ptiy+lia*math.sin(u+(i+1)*absa)])
r_polyline.append(ai_middle)
for i in range(bcip):
r_polyline.append([ptix+lia*math.cos(v+(i+1)*bcsa), ptiy+lia*math.sin(v+(i+1)*bcsa)])
r_polyline.append(ai_end)
return(r_polyline)
def outline_arc_line_with_freecad(ai_segments, ai_outline_closed):
""" Generates the arcs and lines outline with the FreeCAD Part API
"""
# precision
length_epsilon = global_epsilon_length
#angle_epsilon = global_epsilon_angle
#
constant_z = 0 # FreeCAD.Part works in 3D. So we fix z=0 and just use the XY surface
fc_vectors = [Base.Vector(ai_segments[0][0], ai_segments[0][1], constant_z)]
segment_nb = len(ai_segments)-1
fc_outline = []
for i in range(segment_nb-1):
if((abs(ai_segments[i][-2]-ai_segments[i+1][-2])<length_epsilon)and(abs(ai_segments[i][-1]-ai_segments[i+1][-1])<length_epsilon)):
print("ERR264: Error, point of index {:d} and {:d} are identical. x {:0.5f} y {:0.5f}".format(i,i+1, ai_segments[i][-2], ai_segments[i][-1]))
#for j in range(segment_nb):
# print("dbg269: pt {:d} x {:0.3f} y {:0.3f}".format(j, ai_segments[j][-2], ai_segments[j][-1]))
sys.exit(2)
for i in range(segment_nb):
segment_type = 'line'
fc_vectors.append(Base.Vector(ai_segments[i+1][0], ai_segments[i+1][1], constant_z))
point_start = fc_vectors[-2]
point_end = fc_vectors[-1]
if(len(ai_segments[i+1])==4):
segment_type = 'arc'
fc_vectors.append(Base.Vector(ai_segments[i+1][2], ai_segments[i+1][3], constant_z))
point_start = fc_vectors[-3]
point_mid = fc_vectors[-2]
point_end = fc_vectors[-1]
if(i==segment_nb-1):
#print("dbg306: last segment")
if(ai_outline_closed):
#print("dbg307: close")
point_end = fc_vectors[0]
#print("dbg563: i: {:d} segment: {:s}".format(i, segment_type))
if(segment_type=='line'):
fc_outline.append(Part.Line(point_start, point_end))
elif(segment_type=='arc'):
fc_outline.append(Part.Arc(point_start, point_mid, point_end))
r_outline = Part.Shape(fc_outline)
return(r_outline)
def outline_arc_line_with_svgwrite(ai_segments, ai_outline_closed):
""" Generates the arcs and lines outline with the mozman svgwrite
"""
svg_points = [tuple((ai_segments[0][0], ai_segments[0][1]))]
segment_nb = len(ai_segments)-1
svg_outline = []
for i in range(segment_nb):
segment_type = 'line'
svg_points.append(tuple((ai_segments[i+1][0], ai_segments[i+1][1])))
point_start = svg_points[-2]
point_end = svg_points[-1]
if(len(ai_segments[i+1])==4):
segment_type = 'arc'
svg_points.append(tuple((ai_segments[i+1][2], ai_segments[i+1][3])))
point_start = svg_points[-3]
point_mid = svg_points[-2]
point_end = svg_points[-1]
if(i==segment_nb-1):
#print("dbg306: last segment")
if(ai_outline_closed):
#print("dbg307: close")
point_end = svg_points[0]
#print("dbg563: i: {:d} segment: {:s}".format(i, segment_type))
if(segment_type=='line'):
svg_line = svgwrite.shapes.Line(start=point_start, end=point_end)
svg_line.fill('green', opacity=0.25).stroke('black', width=1)
#svg_line.fill('green', opacity=0.0).stroke('black', width=1) # opacity=0.0 doesn't work!
#svg_line.stroke('black', width=1)
svg_outline.append(svg_line)
elif(segment_type=='arc'):
(lia, ptix, ptiy, u, v, w, uv, vw, uw) = arc_3_points_to_radius_center_angles(point_start, point_mid, point_end)
large_arc_flag = 0
if(abs(uw)>math.pi):
large_arc_flag = 1
sweep_flag = 1
if(uw<0):
sweep_flag = 0
target_x = point_end[0] - point_start[0]
target_y = point_end[1] - point_start[1]
if(lia==0):
svg_arc_path = svgwrite.shapes.Line(start=point_start, end=point_end)
else:
svg_arc_path = svgwrite.path.Path(d="M{:0.2f},{:0.2f} a{:0.2f},{:0.2f} 0 {:d},{:d} {:0.2f},{:0.2f}".format(point_start[0], point_start[1], lia, lia, large_arc_flag, sweep_flag, target_x, target_y))
svg_arc_path.fill('green', opacity=0.1).stroke('black', width=1)
svg_outline.append(svg_arc_path)
#arc_polyline = arc_of_circle(point_start, point_mid, point_end, unit_circle_resolution)
#arc_polyline_svg = []
#for i in arc_polyline:
# arc_polyline_svg.append(tuple(i))
#svg_polyline = svgwrite.shapes.Polyline(arc_polyline_svg)
#svg_polyline.fill('green', opacity=0.25).stroke('black', width=1)
##svg_polyline.fill('green', opacity=0.0).stroke('black', width=1) # opacity=0.0 doesn't work!
##svg_polyline.stroke('black', width=1)
#svg_outline.append(svg_polyline)
r_outline = svg_outline
return(r_outline)
def outline_arc_line_with_dxfwrite(ai_segments, ai_outline_closed):
""" Generates the arcs and lines outline with the mozman dxfwrite
"""
dxf_points = [tuple((ai_segments[0][0], ai_segments[0][1]))]
segment_nb = len(ai_segments)-1
dxf_outline = []
for i in range(segment_nb):
segment_type = 'line'
dxf_points.append(tuple((ai_segments[i+1][0], ai_segments[i+1][1])))
point_start = dxf_points[-2]
point_end = dxf_points[-1]
if(len(ai_segments[i+1])==4):
segment_type = 'arc'
dxf_points.append(tuple((ai_segments[i+1][2], ai_segments[i+1][3])))
point_start = dxf_points[-3]
point_mid = dxf_points[-2]
point_end = dxf_points[-1]
if(i==segment_nb-1):
#print("dbg306: last segment")
if(ai_outline_closed):
#print("dbg307: close")
point_end = dxf_points[0]
#print("dbg563: i: {:d} segment: {:s}".format(i, segment_type))
if(segment_type=='line'):
#dxf_line = DXFEngine.line(start=point_start, end=point_end, color=7, layer=default_dxf_layer_name)
dxf_line = DXFEngine.line(start=point_start, end=point_end)
dxf_outline.append(dxf_line)
elif(segment_type=='arc'):
(lia, ptix, ptiy, u, v, w, uv, vw, uw) = arc_3_points_to_radius_center_angles(point_start, point_mid, point_end)
u2 = u
w2 = u + uw
if(uw<0):
#w2 = u + uw + 2*math.pi
u2 = w
w2 = u
#print("dbg384: lia {:0.3f} ptix {:0.3f} ptiy {:0.3f} u2 {:0.3f} w2 {:0.3f}".format(lia, ptix, ptiy, u2*180/math.pi, w2*180/math.pi))
if(lia==0): # when arc_3_points_to_radius_center_angles found that the 3 points are too colinear
dxf_arc = DXFEngine.line(start=point_start, end=point_end)
else:
dxf_arc = DXFEngine.arc(lia, (ptix, ptiy), u2*180/math.pi, w2*180/math.pi)
dxf_outline.append(dxf_arc)
#arc_polyline = arc_of_circle(point_start, point_mid, point_end, unit_circle_resolution)
#arc_polyline_dxf = []
#for i in arc_polyline:
# arc_polyline_dxf.append(tuple(i))
##dxf_polyline = DXFEngine.polyline(arc_polyline_dxf, color=7, layer=default_dxf_layer_name)
##dxf_polyline = DXFEngine.polyline(arc_polyline_dxf, flags=DXFEngine.POLYLINE_3D_POLYLINE)
#dxf_polyline = DXFEngine.polyline(arc_polyline_dxf)
#dxf_outline.append(dxf_polyline)
r_outline = dxf_outline
return(r_outline)
def outline_arc_line_with_tkinter(ai_segments, ai_outline_closed):
""" Transform the arcs and lines outlines into tkinter lines
"""
tkline_points = [tuple((ai_segments[0][0], ai_segments[0][1]))]
segment_nb = len(ai_segments)-1
tkline_outline = []
for i in range(segment_nb):
segment_type = 'line'
tkline_points.append(tuple((ai_segments[i+1][0], ai_segments[i+1][1])))
point_start = tkline_points[-2]
point_end = tkline_points[-1]
if(len(ai_segments[i+1])==4):
segment_type = 'arc'
tkline_points.append(tuple((ai_segments[i+1][2], ai_segments[i+1][3])))
point_start = tkline_points[-3]
point_mid = tkline_points[-2]
point_end = tkline_points[-1]
if(i==segment_nb-1):
#print("dbg306: last segment")
if(ai_outline_closed):
#print("dbg307: close")
point_end = tkline_points[0]
#print("dbg563: i: {:d} segment: {:s}".format(i, segment_type))
if(segment_type=='line'):
tkinter_line = (point_start[0], point_start[1], point_end[0], point_end[1])
tkline_outline.append(tkinter_line)
elif(segment_type=='arc'):
arc_polyline = arc_of_circle(point_start, point_mid, point_end, unit_circle_resolution)
arc_polyline_tk = []
for i in range(len(arc_polyline)-1):
arc_polyline_tk.append((arc_polyline[i][0], arc_polyline[i][1], arc_polyline[i+1][0], arc_polyline[i+1][1]))
tkline_outline.extend(arc_polyline_tk)
r_outline = tuple(tkline_outline)
return(r_outline)
def outline_circle_with_tkinter(ai_center, ai_radius):
""" Transform the circle outline into tkinter lines
"""
circle_points = complete_circle(ai_center, ai_radius, unit_circle_resolution)
circle_polyline_tk = []
for i in range(len(circle_points)-1):
circle_polyline_tk.append((circle_points[i][0], circle_points[i][1], circle_points[i+1][0], circle_points[i+1][1]))
circle_polyline_tk.append((circle_points[-1][0], circle_points[-1][1], circle_points[0][0], circle_points[0][1]))
r_outline = tuple(circle_polyline_tk)
return(r_outline)
def outline_circle(ai_center, ai_radius, ai_backend):
""" Generates a circle according to the selected backend.
Possible backend: freecad, mozman dxfwrite, mozman svgwrite, Tkinter.
"""
#r_outline = ''
# check the radius
if(ai_radius<=0):
print("ERR409: Error, the radius {:0.3f} is negative or null!".format(ai_radius))
sys.exit(2)
# select backend
if(ai_backend=='freecad'):
r_outline = Part.Circle(Base.Vector(ai_center[0], ai_center[1], 0), Base.Vector(0,0,1), ai_radius).toShape()
elif(ai_backend=='svgwrite'):
svg_circle = svgwrite.shapes.Circle(center=(ai_center[0], ai_center[1]), r=ai_radius)
svg_circle.fill('green', opacity=0.25).stroke('black', width=1)
r_outline = [svg_circle] # circle wrapped in list to help the integration in the function write_figure_in_svg()
elif(ai_backend=='dxfwrite'):
dxf_circle = DXFEngine.circle(radius=ai_radius, center=(ai_center[0], ai_center[1]))
r_outline = [dxf_circle] # circle wrapped in list to help the integration in the function write_figure_in_dxf()
elif(ai_backend=='tkinter'):
r_outline = outline_circle_with_tkinter(ai_center, ai_radius)
return(r_outline)
################################################################
# ******** outline creation API ***************
################################################################
### outline level function
def outline_arc_line(ai_segments, ai_backend):
""" Generates the arcs and lines outline according to the selected backend
Possible backend: freecad, mozman dxfwrite, mozman svgwrite, Tkinter.
ai_segments is a list of segments (ie line or arc)
If ai_segments is a list/tuple of list/tuple, it's a list of segments (ie line or arc)
a segment starts from the last point of the previous segment.
a line is defined by a list of two floats [x-end, y-end]
an arc is defined by a list of four floats [x-mid, y-mid, x-end, y-end]
The first element of ai_segments is the starting point, i.e. a list of two floats [x-start, y-start]
If the last point [x-end, y-end] of the last segment is equal to [x-start, y-start] the outline is closed.
ai_segments can be made with lists or tuples or a mix of both.
From a programming point of view, ai_segments is a tuple of 2-tulpes and/or 4-tuples.
eg: ai_segments = [ [x1,y1], .. [x2,y2], .. [x3,y3,x4,y4], .. ]
If ai_segments is a list/tuple of three int/float, it's a circle
"""
r_outline = ''
#print("dbg204: len(ai_segments):", len(ai_segments))
#print("dbg205: ai_backend:", ai_backend)
# check is ai_segments is a list or a tuple
if(not isinstance(ai_segments, (tuple, list))):
print("ERR337: Error, ai_segments must be a list or a tuple")
sys.exit(2)
# check if the outline is a circle or a general outline
if(isinstance(ai_segments[0], (tuple, list))): # general outline
# checks on ai_segments for general outline
if(len(ai_segments)<2):
print("ERR509: Error, the segment list must contain at least 2 elements. Currently, len(ai_segments) = {:d}".format(len(ai_segments)))
sys.exit(2)
# convert any format into format-B
if(len(ai_segments[0])==3): # format-A or format-C
print("WARN231: warning, format-A or format-C used in outline_arc_line() and must be converted in format-B with ideal_outline()")
outline_B = cnc_outline.ideal_outline(ai_segments, "outline_arc_line")
else:
outline_B = ai_segments
if(len(outline_B[0])!=2):
print("ERR403: Error, the first element of the segment list must have 2 elements. Currently, len(outline_B[0]) = {:d}".format(len(outline_B[0])))
sys.exit(2)
for i in range(len(outline_B)):
if((len(outline_B[i])!=2)and(len(outline_B[i])!=4)):
print("ERR405: Error, the length of the segment {:d} must be 2 or 4. Currently len(outline_B[i]) = {:d}".format(i, len(outline_B[i])))
sys.exit(2)
# check if the outline is closed
outline_closed = False
if((outline_B[0][0]==outline_B[-1][-2])and(outline_B[0][1]==outline_B[-1][-1])):
#print("dbg207: the outline is closed.")
outline_closed = True
# select backend
if(ai_backend=='freecad'):
r_outline = outline_arc_line_with_freecad(outline_B, outline_closed)
elif(ai_backend=='svgwrite'):
r_outline = outline_arc_line_with_svgwrite(outline_B, outline_closed)
elif(ai_backend=='dxfwrite'):
r_outline = outline_arc_line_with_dxfwrite(outline_B, outline_closed)
elif(ai_backend=='tkinter'):
r_outline = outline_arc_line_with_tkinter(outline_B, outline_closed)
else: # circle outline
if(len(ai_segments)!=3):
print("ERR658: Error, circle outline must be a list of 3 floats (or int)! Current len: {:d}".format(len(ai_segments)))
print("dbg368: ai_segments:", ai_segments)
sys.exit(2)
r_outline = outline_circle((ai_segments[0], ai_segments[1]), ai_segments[2], ai_backend)
return(r_outline)
# inherited from display_backend
Two_Canvas = display_backend.Two_Canvas
### figure level functions
def figure_simple_display(ai_figure, ai_overlay_figure=[], ai_parameter_info=""):
""" Display the figure with red lines in the Tkinter Two_Canvas GUI
If you want a finer control on the way outlines are displayed (color, width, overlay), you need to work at the outline level (not figure level)
"""
print("Figure simple display with Tkinter")
# convert all outlines in format-B
# graphic layer
graphic_figure = []
i = 0
for i_outline in ai_figure:
if(cnc_outline.check_outline_format(i_outline)==2):
print("WARN441: Warning, the outline {:d} must be converted in format-B with ideal_outline()!".format(i))
graphic_figure.append(cnc_outline.ideal_outline(i_outline, "figure_simple_display"))
else:
graphic_figure.append(i_outline)
i += 1
# overlay layer
overlay_figure = []
i = 0
for i_outline in ai_overlay_figure:
if(cnc_outline.check_outline_format(i_outline)==2):
print("WARN442: Warning, the overlay outline {:d} must be converted in format-B with ideal_outline()!".format(i))
overlay_figure.append(cnc_outline.ideal_outline(i_outline, "figure_simple_display_overlay"))
else:
overlay_figure.append(i_outline)
i += 1
# start GUI
tk_root = Tkinter.Tk()
fsd_canvas = Two_Canvas(tk_root)
# callback function for display_backend
def sub_fsd_canvas_graphics(ai_rotation_direction, ai_angle_position):
# angle position
l_angle_position = float(ai_angle_position)/100
#
r_canvas_graphics = []
for ol in overlay_figure:
rotated_ol = cnc_outline.outline_rotate(ol, 0, 0, l_angle_position) # rotation of center (0,0) and angle l_angle_position
r_canvas_graphics.append(('overlay_lines', outline_arc_line(rotated_ol, 'tkinter'), 'orange', 2))
for ol in graphic_figure:
rotated_ol = cnc_outline.outline_rotate(ol, 0, 0, l_angle_position) # rotation of center (0,0) and angle l_angle_position
r_canvas_graphics.append(('graphic_lines', outline_arc_line(rotated_ol, 'tkinter'), 'red', 1))
return(r_canvas_graphics)
# end of callback function
fsd_canvas.add_canvas_graphic_function(sub_fsd_canvas_graphics)
fsd_canvas.add_parameter_info(ai_parameter_info)
tk_root.mainloop()
del (tk_root, fsd_canvas)
time.sleep(1.0)
return(0)
def write_figure_in_svg(ai_figure, ai_filename):
""" Generate the SVG file ai_filename from the figure ai_figure (list of format B outline)
"""
print("Generate with mozman svgwrite the SVG file {:s}".format(ai_filename))
object_svg = svgwrite.Drawing(filename = ai_filename)
for i_ol in ai_figure:
svg_outline = outline_arc_line(i_ol, 'svgwrite')
for one_line_or_arc in svg_outline:
object_svg.add(one_line_or_arc)
object_svg.save()
return(0)
def write_figure_in_dxf(ai_figure, ai_filename):
""" Generate the DXF file ai_filename from the figure ai_figure (list of format B outline)
"""
print("Generate with mozman dxfwrite the DXF file {:s}".format(ai_filename))
object_dxf = DXFEngine.drawing(ai_filename)
#object_dxf.add_layer("my_dxf_layer")
for i_ol in ai_figure:
dxf_outline = outline_arc_line(i_ol, 'dxfwrite')
for one_line_or_arc in dxf_outline:
object_dxf.add(one_line_or_arc)
object_dxf.save()
return(0)
def figure_to_freecad_25d_part(ai_figure, ai_extrude_height):
""" the first outline of the figure ai_figure is the outer line of the part
the other outlines are holes in the part
If one outline is not closed, only wire (not face) are extruded
the height of the extrusion is ai_extrude_height
It returns a FreeCAD Part object
If you want to make more complex fuse and cut combinations, you need to work at the outline level (not figure level)
"""
# extra length to remove skin during 3D cut operation
remove_skin_extra = 10.0
# check the number of outlines
outline_nb = len(ai_figure)
if(outline_nb<1):
print("ERR876: Error, the figure doesn't contain any outlines!")
sys.exit(2)
# check if one outline is not closed
face_nwire = True
for oli in range(len(ai_figure)):
ol = ai_figure[oli]
if(isinstance(ol[0], (list,tuple))): # it's a general outline (not a circle)
#print("dbg663: outline with {:d} segments".format(len(ol)-1))
if((ol[0][0]!=ol[-1][-2])or(ol[0][1]!=ol[-1][-1])):
face_nwire = False
print("WARN504: Warning, the outline {:d} is not closed! Only wire can be extruded.".format(oli+1))
# create the FreeCAD part
if(face_nwire): # generate a real solid part
outer_face = Part.Face(Part.Wire(outline_arc_line(ai_figure[0], 'freecad').Edges))
outer_solid = outer_face.extrude(Base.Vector(0,0,ai_extrude_height)) # straight linear extrusion
if(outline_nb>1): # holes need to be cut from outer_solid
inner_solid = []
for i in range(outline_nb-1):
inner_face = Part.Face(Part.Wire(outline_arc_line(ai_figure[i+1], 'freecad').Edges))
inner_solid.append(inner_face.extrude(Base.Vector(0,0,ai_extrude_height+2*remove_skin_extra))) # straight linear extrusion
#inner_hole = Part.makeCompound(inner_solid) # not satisfying result with overlap holes
inner_hole = inner_solid[0]
for i in range(outline_nb-2):
inner_hole = inner_hole.fuse(inner_solid[i+1])
inner_hole.translate(Base.Vector(0,0,-remove_skin_extra))
r_part = outer_solid.cut(inner_hole)
else:
r_part = outer_solid
else: # generate a simple extrusion of wires
wire_part = []
for i in range(outline_nb):
wire = Part.Wire(outline_arc_line(ai_figure[i], 'freecad').Edges)
wire_part.append(wire.extrude(Base.Vector(0,0,ai_extrude_height)))
r_part = Part.makeCompound(wire_part)
# return
return(r_part)
################################################################
# ******** test API ***********
################################################################
def outline_arc_line_test1():
""" test the functions outline_arc_line and outline_circle.
"""
l_ol1 = [
[0,0],
[20,0],
[20,20],
[0,20],
[0,0]]
l_ol2 = [
[110,0],
[120,0],
[130,0, 130,10],
[130,20],
[130,30, 120,30],
[110,30],
[100,30, 100,20],
[100,10],
[100,0, 110,0]]
l_ol3 = [
[210,0],
[220,0],
[230,0, 230,10],
[230,20],
[230,30, 220,30],
[210,30],
[200,30, 200,20],
[200,10]]
#[200,0, 210,0]]
# check CC (clock wise)
l_ol4 = [
[300,10],
[300, 20],
[300,30, 310,30],
[320,30],
[330,30, 330,20],
[330,10],
[330,0, 320,0],
[310,0]]
l_ol5 = [
[0,100],
[100,150],
[110,155, 120, 150],
[150,110],
[160,100, 170, 105],
[200,200],
[0,200],
[0,100]]
l_ols = [l_ol1, l_ol2, l_ol3, l_ol4, l_ol5]
#l_ols = [l_ol2]
# circle
l_circle_center = [200,200]
l_circle_radius = 150
# backend freecad
print("dbg701: test1 backend freecad")
for i_ol in l_ols:
r_ol = outline_arc_line(i_ol, 'freecad')
#Part.show(r_ol)
l_test_face = Part.Face(Part.Wire(r_ol.Edges))
r_test_solid = l_test_face.extrude(Base.Vector(0,0,1)) # straight linear extrusion
Part.show(r_test_solid)
r_ol = outline_circle(l_circle_center, l_circle_radius, 'freecad')
l_test_face = Part.Face(Part.Wire(r_ol.Edges))
r_test_solid = l_test_face.extrude(Base.Vector(0,0,1)) # straight linear extrusion
Part.show(r_test_solid)
# create the output directory
l_output_dir = "test_output"
print("Create the output directory: {:s}".format(l_output_dir))
design_help.mkdir_p(l_output_dir)
# backend svgwrite
print("dbg702: test1 backend svgwrite")
output_svg_file_name = "{:s}/outline_arc_line_test1_00.svg".format(l_output_dir)
object_svg = svgwrite.Drawing(filename = output_svg_file_name)
#output_file_idx = 0
for i_ol in l_ols:
#output_file_idx += 1
#output_svg_file_name = "outline_arc_line_test1_{:02d}.svg".format(output_file_idx)
#object_svg = svgwrite.Drawing(filename = output_svg_file_name)
svg_outline = outline_arc_line(i_ol, 'svgwrite')
for one_line_or_arc in svg_outline:
object_svg.add(one_line_or_arc)
#object_svg.save()
one_circle = outline_circle(l_circle_center, l_circle_radius, 'svgwrite')
object_svg.add(one_circle[0])
object_svg.save()
# backend dxfwrite
print("dbg703: test1 backend dxfwrite")
output_dxf_file_name = "{:s}/outline_arc_line_test1_00.dxf".format(l_output_dir)
object_dxf = DXFEngine.drawing(output_dxf_file_name)
#object_dxf.add_layer(default_dxf_layer_name)
for i_ol in l_ols:
dxf_outline = outline_arc_line(i_ol, 'dxfwrite')
for one_line_or_arc in dxf_outline:
object_dxf.add(one_line_or_arc)
one_circle = outline_circle(l_circle_center, l_circle_radius, 'dxfwrite')
object_dxf.add(one_circle[0])
object_dxf.save()
# backend tkinter
print("dbg704: test1 backend tkinter")
tk_root = Tkinter.Tk()
#my_canvas = display_backend.Two_Canvas(tk_root)
my_canvas = Two_Canvas(tk_root)
# callback function for display_backend
def sub_canvas_graphics(ai_rotation_direction, ai_angle_position):
# angle position
l_angle_position = float(ai_angle_position)/100
#
r_canvas_graphics = []
for i_ol in l_ols:
r_canvas_graphics.append(('graphic_lines', outline_arc_line(i_ol, 'tkinter'), 'red', 2))
r_canvas_graphics.append(('graphic_lines', outline_circle(l_circle_center, l_circle_radius, 'tkinter'), 'blue', 2))
return(r_canvas_graphics)
# end of callback function
my_canvas.add_canvas_graphic_function(sub_canvas_graphics)
tk_root.mainloop()
del (my_canvas, tk_root) # because Tkinter will be used again later in this script
#time.sleep(0.3)
### test the figure-level functions
wfl_outer_rectangle_B = [
[-60, -40],
[ 60, -40],
[ 60, 40],
[-60, 40],
[-60, -40]]
wfl_inner_square_B = [
[-10, -10],
[ 10, -10],
[ 10, 10],
[-10, 10],
[-10, -10]]
wfl_inner_circle1 = [30,0, 15]
wfl_inner_circle2 = [40,0, 10]
wfl_figure = [wfl_outer_rectangle_B, wfl_inner_square_B, wfl_inner_circle1, wfl_inner_circle2]
# display the figure
figure_simple_display(wfl_figure)
wfl_extrude_height = 20.0
# create a FreeCAD part
wfl_part = figure_to_freecad_25d_part(wfl_figure, wfl_extrude_height)
# output file with mozman
print("Generate {:s}/obt1_with_mozman.svg".format(l_output_dir))
write_figure_in_svg(wfl_figure, "{:s}/obt1_with_mozman.svg".format(l_output_dir))
print("Generate {:s}/obt1_with_mozman.dxf".format(l_output_dir))
write_figure_in_dxf(wfl_figure, "{:s}/obt1_with_mozman.dxf".format(l_output_dir))
# wfl_part in 3D BRep
print("Generate {:s}/obt1_part.brep".format(l_output_dir))
wfl_part.exportBrep("{:s}/obt1_part.brep".format(l_output_dir))
# wfl_part in 2D DXF
print("Generate {:s}/obt1_part.dxf".format(l_output_dir))
export_2d.export_to_dxf(wfl_part, Base.Vector(0,0,1), wfl_extrude_height/2, "{:s}/obt1_part.dxf".format(l_output_dir)) # slice wfl_part in the XY plan at a height of wfl_extrude_height/2
#
r_test = 1
return(r_test)
################################################################
# ******** command line interface ***********
################################################################
def outline_backends_cli(ai_args=""):
""" command line interface to run this script in standalone
"""
ob_parser = argparse.ArgumentParser(description='Test the outline_backends API.')
ob_parser.add_argument('--test1','--t1', action='store_true', default=False, dest='sw_test1',
help='Run outline_arc_line_test1()')
effective_args = design_help.get_effective_args(ai_args)
ob_args = ob_parser.parse_args(effective_args)
r_obc = 0
print("dbg111: start testing outline_backends.py")
if(ob_args.sw_test1):
r_obc = outline_arc_line_test1()
print("dbg999: end of script")
return(r_obc)
################################################################
# main
################################################################
if __name__ == "__main__":
FreeCAD.Console.PrintMessage("outline_backends.py says hello!\n")
# select your script behavior
#outline_backends_cli()
outline_backends_cli("--test1")
| gpl-3.0 | 2,421,617,700,390,228,500 | 40.894857 | 205 | 0.632931 | false |
UKPLab/sentence-transformers | examples/training/quora_duplicate_questions/training_MultipleNegativesRankingLoss.py | 1 | 8798 | """
This scripts demonstrates how to train a sentence embedding model for Information Retrieval.
As dataset, we use Quora Duplicates Questions, where we have pairs of duplicate questions.
As loss function, we use MultipleNegativesRankingLoss. Here, we only need positive pairs, i.e., pairs of sentences/texts that are considered to be relevant. Our dataset looks like this (a_1, b_1), (a_2, b_2), ... with a_i / b_i a text and (a_i, b_i) are relevant (e.g. are duplicates).
MultipleNegativesRankingLoss takes a random subset of these, for example (a_1, b_1), ..., (a_n, b_n). a_i and b_i are considered to be relevant and should be close in vector space. All other b_j (for i != j) are negative examples and the distance between a_i and b_j should be maximized. Note: MultipleNegativesRankingLoss only works if a random b_j is likely not to be relevant for a_i. This is the case for our duplicate questions dataset: If a sample randomly b_j, it is unlikely to be a duplicate of a_i.
The model we get works well for duplicate questions mining and for duplicate questions information retrieval. For question pair classification, other losses (like OnlineConstrativeLoss) work better.
"""
from torch.utils.data import DataLoader
from sentence_transformers import losses, util
from sentence_transformers import LoggingHandler, SentenceTransformer, evaluation
from sentence_transformers.readers import InputExample
import logging
from datetime import datetime
import csv
import os
from zipfile import ZipFile
import random
#### Just some code to print debug information to stdout
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler()])
logger = logging.getLogger(__name__)
#### /print debug information to stdout
#As base model, we use DistilBERT-base that was pre-trained on NLI and STSb data
model = SentenceTransformer('stsb-distilbert-base')
#Training for multiple epochs can be beneficial, as in each epoch a mini-batch is sampled differently
#hence, we get different negatives for each positive
num_epochs = 10
#Increasing the batch size improves the performance for MultipleNegativesRankingLoss. Choose it as large as possible
#I achieved the good results with a batch size of 300-350 (requires about 30 GB of GPU memory)
train_batch_size = 64
dataset_path = 'quora-IR-dataset'
model_save_path = 'output/training_MultipleNegativesRankingLoss-'+datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
os.makedirs(model_save_path, exist_ok=True)
# Check if the dataset exists. If not, download and extract
if not os.path.exists(dataset_path):
logger.info("Dataset not found. Download")
zip_save_path = 'quora-IR-dataset.zip'
util.http_get(url='https://sbert.net/datasets/quora-IR-dataset.zip', path=zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(dataset_path)
######### Read train data ##########
train_samples = []
with open(os.path.join(dataset_path, "classification/train_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['is_duplicate'] == '1':
train_samples.append(InputExample(texts=[row['question1'], row['question2']], label=1))
train_samples.append(InputExample(texts=[row['question2'], row['question1']], label=1)) #if A is a duplicate of B, then B is a duplicate of A
# After reading the train_samples, we create a DataLoader
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
train_loss = losses.MultipleNegativesRankingLoss(model)
################### Development Evaluators ##################
# We add 3 evaluators, that evaluate the model on Duplicate Questions pair classification,
# Duplicate Questions Mining, and Duplicate Questions Information Retrieval
evaluators = []
###### Classification ######
# Given (quesiton1, question2), is this a duplicate or not?
# The evaluator will compute the embeddings for both questions and then compute
# a cosine similarity. If the similarity is above a threshold, we have a duplicate.
dev_sentences1 = []
dev_sentences2 = []
dev_labels = []
with open(os.path.join(dataset_path, "classification/dev_pairs.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences1.append(row['question1'])
dev_sentences2.append(row['question2'])
dev_labels.append(int(row['is_duplicate']))
binary_acc_evaluator = evaluation.BinaryClassificationEvaluator(dev_sentences1, dev_sentences2, dev_labels)
evaluators.append(binary_acc_evaluator)
###### Duplicate Questions Mining ######
# Given a large corpus of questions, identify all duplicates in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_dev_samples = 10000
dev_sentences = {}
dev_duplicates = []
with open(os.path.join(dataset_path, "duplicate-mining/dev_corpus.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
dev_sentences[row['qid']] = row['question']
if len(dev_sentences) >= max_dev_samples:
break
with open(os.path.join(dataset_path, "duplicate-mining/dev_duplicates.tsv"), encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
if row['qid1'] in dev_sentences and row['qid2'] in dev_sentences:
dev_duplicates.append([row['qid1'], row['qid2']])
# The ParaphraseMiningEvaluator computes the cosine similarity between all sentences and
# extracts a list with the pairs that have the highest similarity. Given the duplicate
# information in dev_duplicates, it then computes and F1 score how well our duplicate mining worked
paraphrase_mining_evaluator = evaluation.ParaphraseMiningEvaluator(dev_sentences, dev_duplicates, name='dev')
evaluators.append(paraphrase_mining_evaluator)
###### Duplicate Questions Information Retrieval ######
# Given a question and a large corpus of thousands questions, find the most relevant (i.e. duplicate) question
# in that corpus.
# For faster processing, we limit the development corpus to only 10,000 sentences.
max_corpus_size = 10000
ir_queries = {} #Our queries (qid => question)
ir_needed_qids = set() #QIDs we need in the corpus
ir_corpus = {} #Our corpus (qid => question)
ir_relevant_docs = {} #Mapping of relevant documents for a given query (qid => set([relevant_question_ids])
with open(os.path.join(dataset_path, 'information-retrieval/dev-queries.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, query, duplicate_ids = line.strip().split('\t')
duplicate_ids = duplicate_ids.split(',')
ir_queries[qid] = query
ir_relevant_docs[qid] = set(duplicate_ids)
for qid in duplicate_ids:
ir_needed_qids.add(qid)
# First get all needed relevant documents (i.e., we must ensure, that the relevant questions are actually in the corpus
distraction_questions = {}
with open(os.path.join(dataset_path, 'information-retrieval/corpus.tsv'), encoding='utf8') as fIn:
next(fIn) #Skip header
for line in fIn:
qid, question = line.strip().split('\t')
if qid in ir_needed_qids:
ir_corpus[qid] = question
else:
distraction_questions[qid] = question
# Now, also add some irrelevant questions to fill our corpus
other_qid_list = list(distraction_questions.keys())
random.shuffle(other_qid_list)
for qid in other_qid_list[0:max(0, max_corpus_size-len(ir_corpus))]:
ir_corpus[qid] = distraction_questions[qid]
#Given queries, a corpus and a mapping with relevant documents, the InformationRetrievalEvaluator computes different IR
# metrices. For our use case MRR@k and Accuracy@k are relevant.
ir_evaluator = evaluation.InformationRetrievalEvaluator(ir_queries, ir_corpus, ir_relevant_docs)
evaluators.append(ir_evaluator)
# Create a SequentialEvaluator. This SequentialEvaluator runs all three evaluators in a sequential order.
# We optimize the model with respect to the score from the last evaluator (scores[-1])
seq_evaluator = evaluation.SequentialEvaluator(evaluators, main_score_function=lambda scores: scores[-1])
logger.info("Evaluate model without training")
seq_evaluator(model, epoch=0, steps=0, output_path=model_save_path)
# Train the model
model.fit(train_objectives=[(train_dataloader, train_loss)],
evaluator=seq_evaluator,
epochs=num_epochs,
warmup_steps=1000,
output_path=model_save_path
)
| apache-2.0 | -5,897,717,325,137,696,000 | 45.062827 | 508 | 0.720732 | false |
BD2KGenomics/slugflow | src/toil/test/src/autoDeploymentTest.py | 1 | 23946 | import logging
import subprocess
import time
from contextlib import contextmanager
from toil.lib.iterables import concat
from toil.test import ApplianceTestSupport, needs_local_appliance, needs_mesos, slow
from toil.version import exactPython
logger = logging.getLogger(__name__)
@needs_mesos
@needs_local_appliance
@slow
class AutoDeploymentTest(ApplianceTestSupport):
"""
Tests various auto-deployment scenarios. Using the appliance, i.e. a docker container,
for these tests allows for running worker processes on the same node as the leader process
while keeping their file systems separate from each other and the leader process. Separate
file systems are crucial to prove that auto-deployment does its job.
"""
def setUp(self):
logging.basicConfig(level=logging.INFO)
super(AutoDeploymentTest, self).setUp()
@contextmanager
def _venvApplianceCluster(self):
"""
Creates an appliance cluster with a virtualenv at './venv' on the leader and a temporary
directory on the host mounted at /data in the leader and worker containers.
"""
dataDirPath = self._createTempDir(purpose='data')
with self._applianceCluster(mounts={dataDirPath: '/data'}) as (leader, worker):
leader.runOnAppliance('virtualenv',
'--system-site-packages',
'--never-download', # prevent silent upgrades to pip etc
'--python', exactPython,
'venv')
leader.runOnAppliance('venv/bin/pip', 'list') # For diagnostic purposes
yield leader, worker
# TODO: Are we sure the python in the appliance we are testing is the same
# as the one we are testing from? If not, how can we get the version it is?
sitePackages = 'venv/lib/{}/site-packages'.format(exactPython)
def testRestart(self):
"""
Test whether auto-deployment works on restart.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
# noinspection PyUnusedLocal
def job(job, disk='10M', cores=1, memory='10M'):
assert False
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
if toil.config.restart:
toil.restart()
else:
toil.start(Job.wrapJobFn(job))
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
pythonArgs = ['venv/bin/python', '-m', 'foo.bar']
toilArgs = ['--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'/data/jobstore']
command = concat(pythonArgs, toilArgs)
self.assertRaises(subprocess.CalledProcessError, leader.runOnAppliance, *command)
# Deploy an updated version of the script ...
userScript = userScript.replace('assert False', 'assert True')
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
# ... and restart Toil.
command = concat(pythonArgs, '--restart', toilArgs)
leader.runOnAppliance(*command)
def testSplitRootPackages(self):
"""
Test whether auto-deployment works with a virtualenv in which jobs are defined in
completely separate branches of the package hierarchy. Initially, auto-deployment did
deploy the entire virtualenv but jobs could only be defined in one branch of the package
hierarchy. We define a branch as the maximum set of fully qualified package paths that
share the same first component. IOW, a.b and a.c are in the same branch, while a.b and
d.c are not.
"""
with self._venvApplianceCluster() as (leader, worker):
# Deploy the library module with job definitions
def libraryModule():
# noinspection PyUnusedLocal
def libraryJob(job):
open('/data/foo.txt', 'w').close()
leader.deployScript(path=self.sitePackages,
packagePath='toil_lib.foo',
script=libraryModule)
# Deploy the user script
def userScript():
# noinspection PyUnresolvedReferences
from toil_lib.foo import libraryJob
from toil.common import Toil
from toil.job import Job
# noinspection PyUnusedLocal
def job(job, disk='10M', cores=1, memory='10M'):
# Double the requirements to prevent chaining as chaining might hide problems
# in auto-deployment code.
job.addChildJobFn(libraryJob, disk='20M', cores=cores, memory=memory)
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
if toil.config.restart:
toil.restart()
else:
toil.start(Job.wrapJobFn(job))
leader.deployScript(path=self.sitePackages,
packagePath='toil_script.bar',
script=userScript)
# Assert that output file isn't there
worker.runOnAppliance('test', '!', '-f', '/data/foo.txt')
# Just being paranoid
self.assertRaises(subprocess.CalledProcessError,
worker.runOnAppliance, 'test', '-f', '/data/foo.txt')
leader.runOnAppliance('venv/bin/python',
'-m', 'toil_script.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'/data/jobstore')
# Assert that out output file is there
worker.runOnAppliance('test', '-f', '/data/foo.txt')
def testUserTypesInJobFunctionArgs(self):
"""
Test encapsulated, function-wrapping jobs where the function arguments reference
user-defined types.
Mainly written to cover https://github.com/BD2KGenomics/toil/issues/1259 but then also
revealed https://github.com/BD2KGenomics/toil/issues/1278.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
# A user-defined type, i.e. a type defined in the user script
class X(object):
pass
# noinspection PyUnusedLocal
def job(job, x, disk='10M', cores=1, memory='10M'):
return x
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
x = X()
with Toil(options) as toil:
r = toil.start(Job.wrapJobFn(job, x).encapsulate())
# Assert that the return value is of type X, but not X from the __main__
# module but X from foo.bar, the canonical name for the user module. The
# translation from __main__ to foo.bar is a side effect of auto-deployment.
assert r.__class__ is not X
import foo.bar
assert r.__class__ is foo.bar.X
# Assert that a copy was made. This is a side effect of pickling/unpickling.
assert x is not r
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
def testDeferralWithConcurrentEncapsulation(self):
"""
Ensure that the following DAG succeeds:
βββββββββββββ
β Root (W1) β
βββββββββββββ
β
ββββββββββββ΄ββββββββββ
βΌ βΌ
ββββββββββββββββββ ββββββββββββββββββββββ
β Deferring (W2) β β Encapsulating (W3) βββββββββββββββββ
ββββββββββββββββββ ββββββββββββββββββββββ β
β β
βΌ βΌ
βββββββββββββββββββββ ββββββββββββββββββ
β Encapsulated (W3) β β Follow-on (W6) β
βββββββββββββββββββββ ββββββββββββββββββ
β β
βββββββββ΄βββββββββ β
βΌ βΌ βΌ
ββββββββββββββββ ββββββββββββββββ ββββββββββββββββ
β Dummy 1 (W4) β β Dummy 2 (W5) β β Last (W6) β
ββββββββββββββββ ββββββββββββββββ ββββββββββββββββ
The Wn numbers denote the worker processes that a particular job is run in. `Deferring`
adds a deferred function and then runs for a long time. The deferred function will be
present in the cache state for the duration of `Deferred`. `Follow-on` is the generic Job
instance that's added by encapsulating a job. It runs on the same worker node but in a
separate worker process, as the first job in that worker. Because β¦
1) it is the first job in its worker process (the user script has not been made available
on the sys.path by a previous job in that worker) and
2) it shares the cache state with the `Deferring` job and
3) it is an instance of Job (and so does not introduce the user script to sys.path itself),
β¦ it might cause problems with deserializing a defered function defined in the user script.
`Encapsulated` has two children to ensure that `Follow-on` is run in a separate worker.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
from toil.common import Toil
from toil.job import Job
def root(rootJob):
def nullFile():
return rootJob.fileStore.jobStore.importFile('file:///dev/null')
startFile = nullFile()
endFile = nullFile()
rootJob.addChildJobFn(deferring, startFile, endFile)
encapsulatedJob = Job.wrapJobFn(encapsulated, startFile)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addChildFn(dummy)
encapsulatingJob = encapsulatedJob.encapsulate()
rootJob.addChild(encapsulatingJob)
encapsulatingJob.addChildJobFn(last, endFile)
def dummy():
pass
def deferred():
pass
# noinspection PyUnusedLocal
def deferring(job, startFile, endFile):
job.defer(deferred)
job.fileStore.jobStore.deleteFile(startFile)
timeout = time.time() + 10
while job.fileStore.jobStore.fileExists(endFile):
assert time.time() < timeout
time.sleep(1)
def encapsulated(job, startFile):
timeout = time.time() + 10
while job.fileStore.jobStore.fileExists(startFile):
assert time.time() < timeout
time.sleep(1)
def last(job, endFile):
job.fileStore.jobStore.deleteFile(endFile)
if __name__ == '__main__':
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
rootJob = Job.wrapJobFn(root)
toil.start(rootJob)
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--retryCount=0',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
def testDeferralWithFailureAndEncapsulation(self):
"""
Ensure that the following DAG succeeds:
βββββββββββββ
β Root (W1) β
βββββββββββββ
β
ββββββββββββ΄ββββββββββ
βΌ βΌ
ββββββββββββββββββ ββββββββββββββββββββββ
β Deferring (W2) β β Encapsulating (W3) βββββββββββββββββββββββββ
ββββββββββββββββββ ββββββββββββββββββββββ β
β β
βΌ βΌ
βββββββββββββββββββββ ββββββββββββββββββ
β Encapsulated (W3) ββββββββββββββ β Follow-on (W7) β
βββββββββββββββββββββ β ββββββββββββββββββ
β β
ββββββββ΄βββββββ β
βΌ βΌ βΌ
ββββββββββββββββββββββββββββ ββββββββββββββββ
β Dummy (W4) ββ Dummy (W5) β β Trigger (W6) β
ββββββββββββββββββββββββββββ ββββββββββββββββ
`Trigger` causes `Deferring` to crash. `Follow-on` runs next, detects `Deferring`'s
left-overs and runs the deferred function. `Follow-on` is an instance of `Job` and the
first job in its worker process. This test ensures that despite these circumstances,
the user script is loaded before the deferred functions defined in it are being run.
`Encapsulated` has two children to ensure that `Follow-on` is run in a new worker. That's
the only way to guarantee that the user script has not been loaded yet, which would cause
the test to succeed coincidentally. We want to test that auto-deploying and loading of the
user script are done properly *before* deferred functions are being run and before any
jobs have been executed by that worker.
"""
with self._venvApplianceCluster() as (leader, worker):
def userScript():
import os
import time
from toil.common import Toil
from toil.job import Job
from toil.leader import FailedJobsException
TIMEOUT = 10
def root(rootJob):
def nullFile():
return rootJob.fileStore.jobStore.importFile('file:///dev/null')
startFile = nullFile()
endFile = nullFile()
rootJob.addChildJobFn(deferring, startFile, endFile)
encapsulatedJob = Job.wrapJobFn(encapsulated, startFile)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addChildFn(dummy)
encapsulatedJob.addFollowOnJobFn(trigger, endFile)
encapsulatingJob = encapsulatedJob.encapsulate()
rootJob.addChild(encapsulatingJob)
def dummy():
pass
def deferredFile(config):
"""
Return path to a file at the root of the job store, exploiting the fact that
the job store is shared between leader and worker container.
"""
prefix = 'file:'
locator = config.jobStore
assert locator.startswith(prefix)
return os.path.join(locator[len(prefix):], 'testDeferredFile')
def deferred(deferredFilePath):
"""
The deferred function that is supposed to run.
"""
os.unlink(deferredFilePath)
# noinspection PyUnusedLocal
def deferring(job, startFile, endFile):
"""
A job that adds the deferred function and then crashes once the `trigger` job
tells it to.
"""
job.defer(deferred, deferredFile(job._config))
jobStore = job.fileStore.jobStore
jobStore.deleteFile(startFile)
with jobStore.updateFileStream(endFile) as fH:
fH.write(str(os.getpid()))
timeout = time.time() + TIMEOUT
while jobStore.fileExists(endFile):
assert time.time() < timeout
time.sleep(1)
os.kill(os.getpid(), 9)
def encapsulated(job, startFile):
"""
A job that waits until the `deferring` job is running and waiting to be crashed.
"""
timeout = time.time() + TIMEOUT
while job.fileStore.jobStore.fileExists(startFile):
assert time.time() < timeout
time.sleep(1)
def trigger(job, endFile):
"""
A job that determines the PID of the worker running the `deferring` job,
tells the `deferring` job to crash and then waits for the corresponding
worker process to end. By waiting we can be sure that the `follow-on` job
finds the left-overs of the `deferring` job.
"""
import errno
jobStore = job.fileStore.jobStore
with jobStore.readFileStream(endFile) as fH:
pid = int(fH.read())
os.kill(pid, 0)
jobStore.deleteFile(endFile)
timeout = time.time() + TIMEOUT
while True:
try:
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
else:
raise
else:
assert time.time() < timeout
time.sleep(1)
def tryUnlink(deferredFilePath):
try:
os.unlink(deferredFilePath)
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if __name__ == '__main__':
import errno
options = Job.Runner.getDefaultArgumentParser().parse_args()
with Toil(options) as toil:
deferredFilePath = deferredFile(toil.config)
open(deferredFilePath, 'w').close()
try:
assert os.path.exists(deferredFilePath)
try:
toil.start(Job.wrapJobFn(root))
except FailedJobsException as e:
assert e.numberOfFailedJobs == 2 # `root` and `deferring`
assert not os.path.exists(deferredFilePath), \
'Apparently, the deferred function did not run.'
else:
assert False, 'Workflow should not have succeeded.'
finally:
tryUnlink(deferredFilePath)
userScript = self._getScriptSource(userScript)
leader.deployScript(path=self.sitePackages,
packagePath='foo.bar',
script=userScript)
leader.runOnAppliance('venv/bin/python', '-m', 'foo.bar',
'--logDebug',
'--batchSystem=mesos',
'--mesosMaster=localhost:5050',
'--retryCount=0',
'--defaultMemory=10M',
'--defaultDisk=10M',
'/data/jobstore')
| apache-2.0 | 5,329,596,328,372,900,000 | 45.396694 | 100 | 0.462326 | false |
pjgeng/Contour-Labels | create_contour_labels.py | 1 | 10693 | ##Create Contour Labels=name
##input_contours=vector
##input_label_guides=vector
##output_contours=output vector
##output_labels=output vector
##create_clipped_contours=boolean True
##smooth_contours=boolean False
##invert_labels=boolean False
##index_contour_modal=number 25
##contour_step=number 5
##start_buffer=number 20
##buffer_increment=number 10
##elevation_field_name=String elev
import math
import qgis
from qgis.core import *
from PyQt4.QtCore import *
def calcDist(p1x,p1y,p2x,p2y):
dist = math.sqrt((p2x - p1x)**2 + (p2y - p1y)**2)
return dist
version = qgis.utils.QGis.QGIS_VERSION.split('-')[0].split('.',2)
progress.setText("Running Contour Label creation for QGIS version "+str(qgis.utils.QGis.QGIS_VERSION.split('-')[0]))
if (smooth_contours):
progress.setText("Smoothing contours")
outputs_GRASSGENERALIZE_1=processing.runalg('grass7:v.generalize',input_contours,9,20,7,50,0.5,3,0,0,0,1,1,1,False,True,None,-1,0.0001,0,None)
use_contours=outputs_GRASSGENERALIZE_1['output']
else:
progress.setText("Using existing contours")
use_contours=input_contours
progress.setText("Creating contour intersections")
outputs_QGISLINEINTERSECTIONS_1=processing.runalg('qgis:lineintersections',use_contours,input_label_guides,'ID','id',None)
progress.setText("Processing elevations")
outputs_QGISJOINATTRIBUTESTABLE_1=processing.runalg('qgis:joinattributestable', outputs_QGISLINEINTERSECTIONS_1['OUTPUT'],input_contours,'ID','ID',None)
outputs_QGISFIELDCALCULATOR_10=processing.runalg('qgis:fieldcalculator', outputs_QGISJOINATTRIBUTESTABLE_1['OUTPUT_LAYER'],'elevation',1,1.0,0.0,True,'"'+str(elevation_field_name)+'"',None)
outputs_QGISDELETECOLUMN_1=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_10['OUTPUT_LAYER'],str(elevation_field_name),None)
outputs_QGISFIELDCALCULATOR_11=processing.runalg('qgis:fieldcalculator', outputs_QGISDELETECOLUMN_1['OUTPUT'],'elev',1,1.0,0.0,True,'"elevation"',None)
outputs_QGISDELETECOLUMN_2=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_11['OUTPUT_LAYER'],'elevation',None)
outputs_QGISDELETECOLUMN_3=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_2['OUTPUT'],'ID_2',None)
outputs_QGISFIELDCALCULATOR_7=processing.runalg('qgis:fieldcalculator', outputs_QGISDELETECOLUMN_3['OUTPUT'],'key',2,128.0,0.0,True,'concat("id_1",\'_\',"elev")',None)
progress.setText("Determining index contours")
outputs_QGISFIELDCALCULATOR_1=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_7['OUTPUT_LAYER'],'index',1,1.0,0.0,True,'"elev" % '+str(index_contour_modal)+' = 0',None)
progress.setText("Calculating label rotation")
outputs_QGISFIELDCALCULATOR_12=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_1['OUTPUT_LAYER'],'rot',0,6.0,3.0,True,'0',None)
outputs_QGISFIXEDDISTANCEBUFFER_3=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISFIELDCALCULATOR_1['OUTPUT_LAYER'],2.0,5.0,False,None)
outputs_QGISINTERSECTION_2=processing.runalg('qgis:intersection', use_contours,outputs_QGISFIXEDDISTANCEBUFFER_3['OUTPUT'],None)
outputs_QGISFIELDCALCULATOR_2=processing.runalg('qgis:fieldcalculator', outputs_QGISINTERSECTION_2['OUTPUT'],'sint',2,128.0,0.0,True,'geom_to_wkt(start_point($geometry))',None)
outputs_QGISFIELDCALCULATOR_3=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_2['OUTPUT_LAYER'],'eint',2,128.0,0.0,True,'geom_to_wkt(end_point($geometry))',None)
if (invert_labels):
deg = 270
else:
deg = 90
outputs_QGISFIELDCALCULATOR_5=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_3['OUTPUT_LAYER'],'rot',0,6.0,3.0,True,str(deg)+'-((atan((x(geom_from_wkt("sint"))-x(geom_from_wkt("eint")))/(y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))))*180/3.14159+(180*(((y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))<0)+(((x(geom_from_wkt("sint"))-x(geom_from_wkt("eint")))<0 AND (y(geom_from_wkt("sint"))-y(geom_from_wkt("eint")))>0)*2))))',None)
progress.setText("Determining contours to label")
rlayer = QgsVectorLayer(outputs_QGISFIELDCALCULATOR_5['OUTPUT_LAYER'], 'rlayer', 'ogr')
tlayer = QgsVectorLayer(outputs_QGISFIELDCALCULATOR_12['OUTPUT_LAYER'], 'tlayer', 'ogr')
dshort =start_buffer
dmid =start_buffer*2
dlong = start_buffer*3
if not tlayer.isValid():
progress.setText("Layer failed to load!")
exit(0)
if not rlayer.isValid():
progress.setText("Layer failed to load!")
exit(0)
tlayer.dataProvider().addAttributes([QgsField("label", QVariant.Int)])
tlayer.updateFields()
new_field_index = tlayer.fieldNameIndex('label')
rot_index = tlayer.fieldNameIndex('rot')
tlayer.startEditing()
for f in processing.features(tlayer):
tlayer.changeAttributeValue(f.id(), new_field_index, 0)
for t in processing.features(rlayer):
if (f['key'] == t['key']):
tlayer.changeAttributeValue(f.id(), rot_index, t['rot'])
tlayer.commitChanges()
tlayer.startEditing()
for f in processing.features(tlayer):
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']+contour_step)):
fup = t
break
else:
fup = -99
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']-contour_step)):
fdown = t
break
else:
fdown = -99
change = 0
if (f['index'] == 1):
change = 1
else:
if (fdown != -99):
distd = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fdown.geometry().asPoint().x(),fdown.geometry().asPoint().y())
fdl = fdown['label']
fdi = fdown['index']
else:
distd = 0
fdl = 0
fdi = 0
if (fup != -99):
distu = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fup.geometry().asPoint().x(),fup.geometry().asPoint().y())
ful = fup['label']
fui = fup['index']
else:
distu = 0
ful = 0
fui = 0
if ((distu >= dlong and distd >= dlong) or (distu >= dlong and fdown == -99) or (distd >= dlong and fup == -99)):
change = 1
elif ((distu >= dmid and fui == 0 and distd >= dmid and fdi == 0) or (distu >= dmid and fui == 0 and fdown == -99) or (distd >= dmid and fdi == 0 and fup == -99)):
change = 1
tlayer.changeAttributeValue(f.id(), new_field_index, change)
tlayer.commitChanges()
tlayer.startEditing()
for f in processing.features(tlayer):
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']+contour_step)):
fup = t
break
else:
fup = -99
t = None
for t in processing.features(tlayer):
if (t['key'] == str(f['id_1'])+'_'+str(f['elev']-contour_step)):
fdown = t
break
else:
fdown = -99
if (f['label'] == 1):
continue
else:
change = 0
if (fdown != -99):
distd = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fdown.geometry().asPoint().x(),fdown.geometry().asPoint().y())
fdl = fdown['label']
fdi = fdown['index']
else:
distd = 0
fdl = 0
fdi = 0
if (fup != -99):
distu = calcDist(f.geometry().asPoint().x(),f.geometry().asPoint().y(),fup.geometry().asPoint().x(),fup.geometry().asPoint().y())
ful = fup['label']
fui = fup['index']
else:
distu = 0
ful = 0
fui = 0
if (distu > dshort and ful == 0 and distd > dshort and fdl == 0):
change = 1
elif (distu > dshort and ful == 0 and distd >= dlong):
change = 1
elif (distd > dshort and fdl == 0 and distu >= dlong):
change = 1
tlayer.changeAttributeValue(f.id(), new_field_index, change)
tlayer.commitChanges()
outputs_QGISFIELDCALCULATOR_8=processing.runalg('qgis:fieldcalculator', outputs_QGISFIELDCALCULATOR_12['OUTPUT_LAYER'],'buffer',1,3.0,0.0,True,'('+str(start_buffer)+' + ((length(to_string( "elev"))-1) * '+str(buffer_increment)+'))',None)
if (create_clipped_contours):
progress.setText("Creating clipped contours")
outputs_QGISEXTRACTBYATTRIBUTE_1=processing.runalg('qgis:extractbyattribute', outputs_QGISFIELDCALCULATOR_8['OUTPUT_LAYER'],'label',0,'1',None)
outputs_QGISFIXEDDISTANCEBUFFER_1=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISEXTRACTBYATTRIBUTE_1['OUTPUT'],2.0,5.0,False,None)
outputs_QGISVARIABLEDISTANCEBUFFER_1=processing.runalg('qgis:variabledistancebuffer', outputs_QGISEXTRACTBYATTRIBUTE_1['OUTPUT'],'buffer',5.0,False,None)
outputs_QGISINTERSECTION_1=processing.runalg('qgis:intersection', use_contours,outputs_QGISVARIABLEDISTANCEBUFFER_1['OUTPUT'],None)
outputs_QGISMULTIPARTTOSINGLEPARTS_1=processing.runalg('qgis:multiparttosingleparts', outputs_QGISINTERSECTION_1['OUTPUT'],None)
if (int(version[0]) == 2 and int(version[1]) == 14):
outputs_QGISEXTRACTBYLOCATION_1=processing.runalg('qgis:extractbylocation', outputs_QGISMULTIPARTTOSINGLEPARTS_1['OUTPUT'],outputs_QGISFIXEDDISTANCEBUFFER_1['OUTPUT'],['intersects','crosses'],None)
elif (int(version[0]) == 2 and int(version[1]) == 16):
outputs_QGISEXTRACTBYLOCATION_1=processing.runalg('qgis:extractbylocation', outputs_QGISMULTIPARTTOSINGLEPARTS_1['OUTPUT'],outputs_QGISFIXEDDISTANCEBUFFER_1['OUTPUT'],['intersects','crosses'],1.0,None)
outputs_QGISFIXEDDISTANCEBUFFER_2=processing.runalg('qgis:fixeddistancebuffer', outputs_QGISEXTRACTBYLOCATION_1['OUTPUT'],2.0,5.0,False,None)
progress.setText("Returning final clipped contours")
if (int(version[0]) == 2 and int(version[1]) == 14):
outputs_QGISDIFFERENCE_1=processing.runalg('qgis:difference',use_contours,outputs_QGISFIXEDDISTANCEBUFFER_2['OUTPUT'],output_contours)
elif (int(version[0]) == 2 and int(version[1]) == 16):
outputs_QGISDIFFERENCE_1=processing.runalg('qgis:difference',use_contours,outputs_QGISFIXEDDISTANCEBUFFER_2['OUTPUT'],False,output_contours)
else:
output_contours = input_contours
progress.setText("Cleaning output layers.")
progress.setText("Returning labels")
outputs_QGISDELETECOLUMN_4=processing.runalg('qgis:deletecolumn',outputs_QGISFIELDCALCULATOR_8['OUTPUT_LAYER'],'buffer',None)
outputs_QGISDELETECOLUMN_5=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_4['OUTPUT'],'ID',None)
outputs_QGISDELETECOLUMN_6=processing.runalg('qgis:deletecolumn',outputs_QGISDELETECOLUMN_5['OUTPUT'],'ID_1',output_labels)
progress.setText("All done.")
| mit | -4,487,152,315,475,190,000 | 56.181818 | 464 | 0.678668 | false |
Subsets and Splits