code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# ###
# Copyright (c) 2013, Rice University
# This software is subject to the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
# ###
import uuid
from sqlalchemy.types import TypeDecorator, CHAR
from sqlalchemy.dialects.postgresql import UUID
# Derived from:
# http://docs.sqlalchemy.org/en/latest/core/types.html#backend-agnostic-guid-type
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses Postgresql's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value)
| Connexions/cnx-user | cnxuser/_sqlalchemy.py | Python | agpl-3.0 | 1,342 |
from ..models import Post, Category, Tag
from django.db.models.aggregates import Count
from django import template
register = template.Library()
# 最近文章
@register.simple_tag
def get_recent_posts(num=9):
return Post.objects.all().order_by('-modified_time')[:num]
# 按月归档
@register.simple_tag
def archives():
return Post.objects.dates('created_time', 'month', order='DESC')
# 分类归档
@register.simple_tag
def get_categories():
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
# 标签云
@register.simple_tag
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0) | RewrZ/RewrZ | rewrz/blog/templatetags/blog_tags.py | Python | agpl-3.0 | 698 |
"""
Views for the verification flow
"""
import json
import logging
import decimal
from mitxmako.shortcuts import render_to_response
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.http import urlencode
from django.contrib.auth.decorators import login_required
from course_modes.models import CourseMode
from student.models import CourseEnrollment
from student.views import course_from_id
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors.CyberSource import (
get_signed_purchase_params, get_purchase_endpoint
)
from verify_student.models import SoftwareSecurePhotoVerification
import ssencrypt
log = logging.getLogger(__name__)
class VerifyView(View):
@method_decorator(login_required)
def get(self, request, course_id):
"""
Displays the main verification view, which contains three separate steps:
- Taking the standard face photo
- Taking the id photo
- Confirming that the photos and payment price are correct
before proceeding to payment
"""
upgrade = request.GET.get('upgrade', False)
# If the user has already been verified within the given time period,
# redirect straight to the payment -- no need to verify again.
if SoftwareSecurePhotoVerification.user_has_valid_or_pending(request.user):
return redirect(
reverse('verify_student_verified',
kwargs={'course_id': course_id}) + "?upgrade={}".format(upgrade)
)
elif CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
else:
# If they haven't completed a verification attempt, we have to
# restart with a new one. We can't reuse an older one because we
# won't be able to show them their encrypted photo_id -- it's easier
# bookkeeping-wise just to start over.
progress_state = "start"
verify_mode = CourseMode.mode_for_course(course_id, "verified")
# if the course doesn't have a verified mode, we want to kick them
# from the flow
if not verify_mode:
return redirect(reverse('dashboard'))
if course_id in request.session.get("donation_for_course", {}):
chosen_price = request.session["donation_for_course"][course_id]
else:
chosen_price = verify_mode.min_price
course = course_from_id(course_id)
context = {
"progress_state": progress_state,
"user_full_name": request.user.profile.name,
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"purchase_endpoint": get_purchase_endpoint(),
"suggested_prices": [
decimal.Decimal(price)
for price in verify_mode.suggested_prices.split(",")
],
"currency": verify_mode.currency.upper(),
"chosen_price": chosen_price,
"min_price": verify_mode.min_price,
"upgrade": upgrade,
}
return render_to_response('verify_student/photo_verification.html', context)
class VerifiedView(View):
"""
View that gets shown once the user has already gone through the
verification flow
"""
@method_decorator(login_required)
def get(self, request, course_id):
"""
Handle the case where we have a get request
"""
upgrade = request.GET.get('upgrade', False)
if CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
verify_mode = CourseMode.mode_for_course(course_id, "verified")
if course_id in request.session.get("donation_for_course", {}):
chosen_price = request.session["donation_for_course"][course_id]
else:
chosen_price = verify_mode.min_price.format("{:g}")
course = course_from_id(course_id)
context = {
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"purchase_endpoint": get_purchase_endpoint(),
"currency": verify_mode.currency.upper(),
"chosen_price": chosen_price,
"upgrade": upgrade,
}
return render_to_response('verify_student/verified.html', context)
@login_required
def create_order(request):
"""
Submit PhotoVerification and create a new Order for this verified cert
"""
if not SoftwareSecurePhotoVerification.user_has_valid_or_pending(request.user):
attempt = SoftwareSecurePhotoVerification(user=request.user)
b64_face_image = request.POST['face_image'].split(",")[1]
b64_photo_id_image = request.POST['photo_id_image'].split(",")[1]
attempt.upload_face_image(b64_face_image.decode('base64'))
attempt.upload_photo_id_image(b64_photo_id_image.decode('base64'))
attempt.mark_ready()
attempt.save()
course_id = request.POST['course_id']
donation_for_course = request.session.get('donation_for_course', {})
current_donation = donation_for_course.get(course_id, decimal.Decimal(0))
contribution = request.POST.get("contribution", donation_for_course.get(course_id, 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
if amount != current_donation:
donation_for_course[course_id] = amount
request.session['donation_for_course'] = donation_for_course
verified_mode = CourseMode.modes_for_course_dict(course_id).get('verified', None)
# make sure this course has a verified mode
if not verified_mode:
return HttpResponseBadRequest(_("This course doesn't support verified certificates"))
if amount < verified_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
# I know, we should check this is valid. All kinds of stuff missing here
cart = Order.get_cart_for_user(request.user)
cart.clear()
CertificateItem.add_to_order(cart, course_id, amount, 'verified')
params = get_signed_purchase_params(cart)
return HttpResponse(json.dumps(params), content_type="text/json")
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
sig_valid = ssencrypt.has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id {}, but not found".format(receipt_id))
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for {}".format(receipt_id))
attempt.approve()
elif result == "FAIL":
log.debug("Denying verification for {}".format(receipt_id))
attempt.deny(json.dumps(reason), error_code=error_code)
elif result == "SYSTEM FAIL":
log.debug("System failure for {} -- resetting to must_retry".format(receipt_id))
attempt.system_error(json.dumps(reason), error_code=error_code)
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result {}".format(result))
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
return HttpResponse("OK!")
@login_required
def show_requirements(request, course_id):
"""
Show the requirements necessary for the verification flow.
"""
if CourseEnrollment.enrollment_mode_for_user(request.user, course_id) == 'verified':
return redirect(reverse('dashboard'))
upgrade = request.GET.get('upgrade', False)
course = course_from_id(course_id)
context = {
"course_id": course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"is_not_active": not request.user.is_active,
"upgrade": upgrade,
}
return render_to_response("verify_student/show_requirements.html", context)
class ReverifyView(View):
"""
The main reverification view. Under similar constraints as the main verification view.
Has to perform these functions:
- take new face photo
- take new id photo
- submit photos to photo verification service
Does not need to be attached to a particular course.
Does not need to worry about pricing
"""
@method_decorator(login_required)
def get(self, request):
"""
display this view
"""
context = {
"user_full_name": request.user.profile.name,
"error": False,
}
return render_to_response("verify_student/photo_reverification.html", context)
@method_decorator(login_required)
def post(self, request):
"""
submits the reverification to SoftwareSecure
"""
try:
attempt = SoftwareSecurePhotoVerification(user=request.user)
b64_face_image = request.POST['face_image'].split(",")[1]
b64_photo_id_image = request.POST['photo_id_image'].split(",")[1]
attempt.upload_face_image(b64_face_image.decode('base64'))
attempt.upload_photo_id_image(b64_photo_id_image.decode('base64'))
attempt.mark_ready()
# save this attempt
attempt.save()
# then submit it across
attempt.submit()
return HttpResponseRedirect(reverse('verify_student_reverification_confirmation'))
except Exception:
log.exception(
"Could not submit verification attempt for user {}".format(request.user.id)
)
context = {
"user_full_name": request.user.profile.name,
"error": True,
}
return render_to_response("verify_student/photo_reverification.html", context)
@login_required
def reverification_submission_confirmation(_request):
"""
Shows the user a confirmation page if the submission to SoftwareSecure was successful
"""
return render_to_response("verify_student/reverification_confirmation.html")
| TsinghuaX/edx-platform | lms/djangoapps/verify_student/views.py | Python | agpl-3.0 | 12,961 |
# -*- coding: utf-8 -*-
# Copyright 2018 Simone Rubino - Agile Business Group
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, models
from odoo.tools import safe_eval
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
@api.multi
def get_price_available(self, order):
self.ensure_one()
category_price = 0.0
price_dict = self.get_price_dict(order)
for line in self.price_rule_ids:
if line.product_category_id:
products = order.mapped('order_line.product_id')
test = any(product.categ_id == line.product_category_id
for product in products)
if test:
category_price = line.product_category_price
break
else:
test = safe_eval(
line.variable + line.operator + str(line.max_value),
price_dict)
if test:
break
if category_price:
return category_price
# Note that this will evaluate all the price_rule_ids again and
# our category rules might interfere withthe correct computation
return super(DeliveryCarrier, self).get_price_available(order)
def get_price_dict(self, order):
weight = volume = quantity = 0
total_delivery = 0.0
for line in order.order_line:
if line.state == 'cancel':
continue
if line.is_delivery:
total_delivery += line.price_total
if not line.product_id or line.is_delivery:
continue
qty = line.product_uom._compute_quantity(
line.product_uom_qty, line.product_id.uom_id)
weight += (line.product_id.weight or 0.0) * qty
volume += (line.product_id.volume or 0.0) * qty
quantity += qty
total = (order.amount_total or 0.0) - total_delivery
total = order.currency_id.with_context(date=order.date_order) \
.compute(total, order.company_id.currency_id)
return {'price': total, 'volume': volume, 'weight': weight,
'wv': volume * weight, 'quantity': quantity}
| OCA/carrier-delivery | delivery_price_by_category/models/delivery_carrier.py | Python | agpl-3.0 | 2,275 |
import time
import uuid as uuid
from splinter.browser import Browser
from django.contrib.auth.models import User
from webparticipation.apps.ureporter.models import Ureporter
from webparticipation.apps.ureport_auth.models import PasswordReset
def before_all(context):
context.browser = Browser('chrome')
time.sleep(5)
def before_scenario(context, scenario):
email = '[email protected]'
username = 'user999999999'
password = 'password'
email1 = '[email protected]'
username1 = 'user999999991'
uid = uuid.uuid4()
uid1 = uuid.uuid4()
Ureporter.objects.create(uuid=uid,
user=User.objects.create_user(username=username, email=email, password=password))
Ureporter.objects.create(uuid=uid1,
user=User.objects.create_user(username=username1, email=email1, password=password))
def after_scenario(context, scenario):
User.objects.all().delete()
Ureporter.objects.all().delete()
PasswordReset.objects.all().delete()
def after_all(context):
context.browser.quit()
context.browser = None
context.server = None | rapidpro/ureport-web-participation | features/environment.py | Python | agpl-3.0 | 1,127 |
# -*- coding: utf-8 -*-
from nose.tools import raises
from openfisca_core import periods
from openfisca_core.columns import IntCol
from openfisca_core.formulas import CycleError, SimpleFormulaColumn
from openfisca_core.tests import dummy_country
from openfisca_core.tests.dummy_country import Individus, reference_formula
from openfisca_core.tools import assert_near
# 1 <--> 2 with same period
@reference_formula
class variable1(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable2', period)
@reference_formula
class variable2(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable1', period)
# 3 <--> 4 with a period offset, but without explicit cycle allowed
@reference_formula
class variable3(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable4', period.last_year)
@reference_formula
class variable4(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
return period, simulation.calculate('variable3', period)
# 5 -f-> 6 with a period offset, with cycle flagged but not allowed
# <---
@reference_formula
class variable5(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable6 = simulation.calculate('variable6', period.last_year, max_nb_cycles = 0)
return period, 5 + variable6
@reference_formula
class variable6(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable5 = simulation.calculate('variable5', period)
return period, 6 + variable5
# december cotisation depending on november value
@reference_formula
class cotisation(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
period = period.this_month
if period.start.month == 12:
return period, 2 * simulation.calculate('cotisation', period.last_month, max_nb_cycles = 1)
else:
return period, self.zeros() + 1
# 7 -f-> 8 with a period offset, with explicit cycle allowed (1 level)
# <---
@reference_formula
class variable7(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable8 = simulation.calculate('variable8', period.last_year, max_nb_cycles = 1)
return period, 7 + variable8
@reference_formula
class variable8(SimpleFormulaColumn):
column = IntCol
entity_class = Individus
def function(self, simulation, period):
variable7 = simulation.calculate('variable7', period)
return period, 8 + variable7
# TaxBenefitSystem instance declared after formulas
tax_benefit_system = dummy_country.init_tax_benefit_system()
reference_period = periods.period(u'2013')
@raises(AssertionError)
def test_pure_cycle():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable1')
@raises(CycleError)
def test_cycle_time_offset():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
simulation.calculate('variable3')
def test_allowed_cycle():
"""
Calculate variable5 then variable6 then in the order order, to verify that the first calculated variable
has no effect on the result.
"""
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable6 = simulation.calculate('variable6')
variable5 = simulation.calculate('variable5')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_allowed_cycle_different_order():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable5 = simulation.calculate('variable5')
variable6 = simulation.calculate('variable6')
variable6_last_year = simulation.calculate('variable6', reference_period.last_year)
assert_near(variable5, [5])
assert_near(variable6, [11])
assert_near(variable6_last_year, [0])
def test_cotisation_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period.last_month, # December
parent1 = dict(),
).new_simulation(debug = True)
cotisation = simulation.calculate('cotisation')
assert_near(cotisation, [2])
def test_cycle_1_level():
simulation = tax_benefit_system.new_scenario().init_single_entity(
period = reference_period,
parent1 = dict(),
).new_simulation(debug = True)
variable7 = simulation.calculate('variable7')
# variable8 = simulation.calculate('variable8')
assert_near(variable7, [22])
| adrienpacifico/openfisca-core | openfisca_core/tests/test_cycles.py | Python | agpl-3.0 | 5,495 |
from django.test import TestCase
from django.views.generic import TemplateView
from django.contrib.contenttypes.models import ContentType
from jsonattrs import models, mixins
from . import factories
class XLangLabelsTest(TestCase):
def test_dict(self):
res = mixins.template_xlang_labels({'en': 'Field 1', 'de': 'Feld 1'})
assert 'data-label-en="Field 1"' in res
assert 'data-label-de="Feld 1"' in res
def test_string(self):
assert mixins.template_xlang_labels('Field 1') == ''
def test_none(self):
assert mixins.template_xlang_labels(None) == ''
class JsonAttrsView(mixins.JsonAttrsMixin, TemplateView):
attributes_field = 'attrs'
class JsonAttrsMixinTest(TestCase):
def test_get_context(self):
models.create_attribute_types()
org = factories.OrganizationFactory.create()
project = factories.ProjectFactory.create(organization=org)
content_type = ContentType.objects.get(
app_label='tests', model='party')
schema1 = models.Schema.objects.create(
content_type=content_type,
selectors=(org.id, project.id))
models.Attribute.objects.create(
schema=schema1,
name='field_1',
long_name='Field 1',
attr_type=models.AttributeType.objects.get(name='text'),
index=0
)
models.Attribute.objects.create(
schema=schema1,
name='field_2',
long_name='Field 2',
attr_type=models.AttributeType.objects.get(name='text'),
index=1
)
models.Attribute.objects.create(
schema=schema1,
name='field_3',
long_name='Field 3',
attr_type=models.AttributeType.objects.get(name='select_multiple'),
choices=['one', 'two', 'three'],
choice_labels=['Choice 1', 'Choice 2', 'Choice 3'],
index=2,
)
models.Attribute.objects.create(
schema=schema1,
name='field_4',
long_name='Field 4',
attr_type=models.AttributeType.objects.get(name='select_one'),
choices=['one', 'two', 'three'],
choice_labels=['Choice 1', 'Choice 2', 'Choice 3'],
index=3,
)
party = factories.PartyFactory.create(
project=project,
attrs={'field_1': 'Some value',
'field_3': ['one', 'three'],
'field_4': 'two'}
)
view = JsonAttrsView()
view.object = party
context = view.get_context_data()
assert len(context['attrs']) == 4
assert context['attrs'][0] == ('Field 1', 'Some value', '', '')
assert context['attrs'][1] == ('Field 2', '—', '', '')
assert context['attrs'][2] == ('Field 3', 'Choice 1, Choice 3', '', '')
assert context['attrs'][3] == ('Field 4', 'Choice 2', '', '')
def test_get_context_xlang(self):
models.create_attribute_types()
org = factories.OrganizationFactory.create()
project = factories.ProjectFactory.create(organization=org)
content_type = ContentType.objects.get(
app_label='tests', model='party')
schema1 = models.Schema.objects.create(
content_type=content_type,
selectors=(org.id, project.id),
default_language='en')
models.Attribute.objects.create(
schema=schema1,
name='field_1',
long_name={'en': 'Field 1', 'de': 'Feld 1'},
attr_type=models.AttributeType.objects.get(name='text'),
index=0
)
models.Attribute.objects.create(
schema=schema1,
name='field_2',
long_name={'en': 'Field 2', 'de': 'Feld 2'},
attr_type=models.AttributeType.objects.get(name='text'),
index=1
)
models.Attribute.objects.create(
schema=schema1,
name='field_3',
long_name={'en': 'Field 3', 'de': 'Feld 3'},
attr_type=models.AttributeType.objects.get(name='select_multiple'),
choices=['one', 'two', 'three'],
choice_labels=[{'en': 'Choice 1', 'de': 'Wahl 1'},
{'en': 'Choice 2', 'de': 'Wahl 2'},
{'en': 'Choice 3', 'de': 'Wahl 3'}],
index=2,
)
models.Attribute.objects.create(
schema=schema1,
name='field_4',
long_name={'en': 'Field 4', 'de': 'Feld 4'},
attr_type=models.AttributeType.objects.get(name='select_one'),
choices=['one', 'two', 'three'],
choice_labels=[{'en': 'Choice 1', 'de': 'Wahl 1'},
{'en': 'Choice 2', 'de': 'Wahl 2'},
{'en': 'Choice 3', 'de': 'Wahl 3'}],
index=3,
)
party = factories.PartyFactory.create(
project=project,
attrs={'field_1': 'Some value',
'field_3': ['one', 'three'],
'field_4': 'two'}
)
view = JsonAttrsView()
view.object = party
context = view.get_context_data()
assert len(context['attrs']) == 4
field_1 = context['attrs'][0]
assert field_1[0] == 'Field 1'
assert field_1[1] == 'Some value'
assert 'data-label-en="Field 1"' in field_1[2]
assert 'data-label-de="Feld 1"' in field_1[2]
field_2 = context['attrs'][1]
assert field_2[0] == 'Field 2'
assert field_2[1] == '—'
assert 'data-label-en="Field 2"' in field_2[2]
assert 'data-label-de="Feld 2"' in field_2[2]
field_3 = context['attrs'][2]
assert field_3[0] == 'Field 3'
assert field_3[1] == 'Choice 1, Choice 3'
assert 'data-label-en="Field 3"' in field_3[2]
assert 'data-label-de="Feld 3"' in field_3[2]
assert 'data-label-en="Choice 1, Choice 3"' in field_3[3]
assert 'data-label-de="Wahl 1, Wahl 3"' in field_3[3]
field_4 = context['attrs'][3]
assert field_4[0] == 'Field 4'
assert field_4[1] == 'Choice 2'
assert 'data-label-en="Field 4"' in field_4[2]
assert 'data-label-de="Feld 4"' in field_4[2]
assert 'data-label-en="Choice 2"' in field_4[3]
assert 'data-label-de="Wahl 2"' in field_4[3]
| Cadasta/django-jsonattrs | tests/test_mixins.py | Python | agpl-3.0 | 6,431 |
import json as json_
# Template for code 200 requests so data can easily be added
def ok(d=None, *, json=True):
code = {'code': 200, 'status': 'OK', 'data': d}
if json:
code = json_.dumps(code)
return code
# The 400 codes shouldn't require any special aruments.
def invalid_request(*, json=True):
code = {'code': 400, 'status': 'MALFORMED_REQUEST'}
if json:
code = json_.dumps(code)
return code
def unknown_request(*, json=True):
code = {'code': 400, 'status': 'UNKNOWN_REQUEST'}
if json:
code = json_.dumps(code)
return code
# You can assign the internal server error a number for debugging purposes.
def internal_server_error(n=None, *, json=True):
status_string = 'INTERNAL_SERVER_ERROR'
if n is not None:
status_string += '_{}'.format(n)
code = {'code': 500, 'status': status_string}
if json:
code = json_.dumps(code)
return code
| TacticAlpha/basic-lan-webserver | server/status.py | Python | agpl-3.0 | 938 |
"""URLs to run the tests."""
try:
from django.urls import include
except ImportError:
from django.conf.urls import include
from django.conf.urls import url
from django.contrib import admin
admin.autodiscover()
urlpatterns = (
url(r'^admin/', admin.site.urls),
url(r'^status', include('server_status.urls')),
)
| mitodl/django-server-status | server_status/tests/urls.py | Python | agpl-3.0 | 328 |
from django.conf import settings
def posthog_configurations(request):
return {
'POSTHOG_API_KEY': settings.POSTHOG_API_KEY,
'POSTHOG_API_URL': settings.POSTHOG_API_URL,
}
| pythonprobr/pythonpro-website | pythonpro/analytics/context_processors.py | Python | agpl-3.0 | 197 |
"""Add replies column
Revision ID: 3b0d1321079e
Revises: 1e2d77a2f0c4
Create Date: 2021-11-03 23:32:15.720557
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "3b0d1321079e"
down_revision = "1e2d77a2f0c4"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"comment",
sa.Column(
"replies", postgresql.JSONB(astext_type=sa.Text()), nullable=True
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("comment", "replies")
# ### end Alembic commands ###
| cgwire/zou | zou/migrations/versions/3b0d1321079e_.py | Python | agpl-3.0 | 799 |
"""Monitor the transaction log for changes that should be synced back to the
account backend.
TODO(emfree):
* Track syncback failure/success state, and implement retries
(syncback actions may be lost if the service restarts while actions are
still pending).
* Add better logging.
"""
import gevent
from sqlalchemy import asc, func
from inbox.util.concurrency import retry_with_logging
from inbox.log import get_logger
from inbox.models.session import session_scope
from inbox.models import ActionLog, Namespace
from inbox.actions import (mark_read, mark_unread, archive, unarchive, star,
unstar, save_draft, delete_draft, mark_spam,
unmark_spam, mark_trash, unmark_trash, send_draft)
ACTION_FUNCTION_MAP = {
'archive': archive,
'unarchive': unarchive,
'mark_read': mark_read,
'mark_unread': mark_unread,
'star': star,
'unstar': unstar,
'mark_spam': mark_spam,
'unmark_spam': unmark_spam,
'mark_trash': mark_trash,
'unmark_trash': unmark_trash,
'send_draft': send_draft,
'save_draft': save_draft,
'delete_draft': delete_draft
}
class SyncbackService(gevent.Greenlet):
"""Asynchronously consumes the action log and executes syncback actions."""
def __init__(self, poll_interval=1, chunk_size=22, max_pool_size=22):
self.log = get_logger()
self.worker_pool = gevent.pool.Pool(max_pool_size)
self.poll_interval = poll_interval
self.chunk_size = chunk_size
with session_scope() as db_session:
# Just start working from the head of the log.
# TODO(emfree): once we can do retry, persist a pointer into the
# transaction log and advance it only on syncback success.
self.minimum_id, = db_session.query(
func.max(ActionLog.id)).one()
if self.minimum_id is None:
self.minimum_id = -1
gevent.Greenlet.__init__(self)
def _process_log(self):
# TODO(emfree) handle the case that message/thread objects may have
# been deleted in the interim
with session_scope() as db_session:
query = db_session.query(ActionLog). \
filter(ActionLog.id > self.minimum_id). \
order_by(asc(ActionLog.id)).yield_per(self.chunk_size)
for log_entry in query:
self.minimum_id = log_entry.id
action_function = ACTION_FUNCTION_MAP[log_entry.action]
namespace = db_session.query(Namespace). \
get(log_entry.namespace_id)
self._execute_async_action(action_function,
namespace.account_id,
log_entry.record_id)
def _execute_async_action(self, func, *args):
self.log.info('Scheduling syncback action', func=func, args=args)
g = gevent.Greenlet(retry_with_logging, lambda: func(*args),
logger=self.log)
g.link_value(lambda _: self.log.info('Syncback action completed',
func=func, args=args))
self.worker_pool.start(g)
def _run_impl(self):
self.log.info('Starting action service')
while True:
self._process_log()
gevent.sleep(self.poll_interval)
def _run(self):
retry_with_logging(self._run_impl, self.log)
| rmasters/inbox | inbox/transactions/actions.py | Python | agpl-3.0 | 3,454 |
# -*- coding: utf-8 -*-
#
# 2016-05-07 Cornelius Kölbel <[email protected]>
# Add realm dropdown
# 2016-04-06 Cornelius Kölbel <[email protected]>
# Add time dependency in policy
# 2016-02-22 Cornelius Kölbel <[email protected]>
# Add RADIUS passthru policy
# 2016-02-05 Cornelius Kölbel <[email protected]>
# Add tokenwizard in scope UI
# 2015-12-30 Cornelius Kölbel <[email protected]>
# Add password reset policy
# 2015-12-28 Cornelius Kölbel <[email protected]>
# Add registration policy
# 2015-12-16 Cornelius Kölbel <[email protected]>
# Add tokenissuer policy
# 2015-11-29 Cornelius Kölbel <[email protected]>
# Add getchallenges policy
# 2015-10-31 Cornelius Kölbel <[email protected]>
# Add last_auth policy.
# 2015-10-30 Cornelius Kölbel <[email protected]>
# Display user details in token list
# 2015-10-26 Cornelius Kölbel <[email protected]>
# Add default token type for enrollment
# 2015-10-14 Cornelius Kölbel <[email protected]>
# Add auth_max_success and auth_max_fail actions to
# scope authorization
# 2015-10-09 Cornelius Kölbel <[email protected]>
# Add token_page_size and user_page_size policy
# 2015-09-06 Cornelius Kölbel <[email protected]>
# Add challenge_response authentication policy
# 2015-06-30 Cornelius Kölbel <[email protected]>
# Add the OTP PIN handling
# 2015-06-29 Cornelius Kölbel <[email protected]>
# Add the mangle policy
# 2015-04-03 Cornelius Kölbel <[email protected]>
# Add WebUI logout time.
# 2015-03-27 Cornelius Kölbel <[email protected]>
# Add PIN policies in USER scope
# 2015-02-06 Cornelius Kölbel <[email protected]>
# Rewrite for flask migration.
# Policies are not handled by decorators as
# 1. precondition for API calls
# 2. internal modifications of LIB-functions
# 3. postcondition for API calls
#
# Jul 07, 2014 add check_machine_policy, Cornelius Kölbel
# May 08, 2014 Cornelius Kölbel
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# privacyIDEA is a fork of LinOTP
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# [email protected]
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Base function to handle the policy entries in the database.
This module only depends on the db/models.py
The functions of this module are tested in tests/test_lib_policy.py
A policy has the attributes
* name
* scope
* action
* realm
* resolver
* user
* client
* active
``name`` is the unique identifier of a policy. ``scope`` is the area,
where this policy is meant for. This can be values like admin, selfservice,
authentication...
``scope`` takes only one value.
``active`` is bool and indicates, whether a policy is active or not.
``action``, ``realm``, ``resolver``, ``user`` and ``client`` can take a comma
separated list of values.
realm and resolver
------------------
If these are empty '*', this policy matches each requested realm.
user
----
If the user is empty or '*', this policy matches each user.
You can exclude users from matching this policy, by prepending a '-' or a '!'.
``*, -admin`` will match for all users except the admin.
client
------
The client is identified by its IP address. A policy can contain a list of
IP addresses or subnets.
You can exclude clients from subnets by prepending the client with a '-' or
a '!'.
``172.16.0.0/24, -172.16.0.17`` will match each client in the subnet except
the 172.16.0.17.
time
----
You can specify a time in which the policy should be active.
Time formats are
<dow>-<dow>:<hh>:<mm>-<hh>:<mm>, ...
<dow>:<hh>:<mm>-<hh>:<mm>
<dow>:<hh>-<hh>
and any combination of it. "dow" being day of week Mon, Tue, Wed, Thu, Fri,
Sat, Sun.
"""
from .log import log_with
from configobj import ConfigObj
from netaddr import IPAddress
from netaddr import IPNetwork
from gettext import gettext as _
import logging
from ..models import (Policy, db)
from privacyidea.lib.config import (get_token_classes, get_token_types)
from privacyidea.lib.error import ParameterError, PolicyError
from privacyidea.lib.realm import get_realms
from privacyidea.lib.resolver import get_resolver_list
from privacyidea.lib.smtpserver import get_smtpservers
from privacyidea.lib.radiusserver import get_radiusservers
from privacyidea.lib.utils import check_time_in_range
log = logging.getLogger(__name__)
optional = True
required = False
class SCOPE(object):
__doc__ = """This is the list of the allowed scopes that can be used in
policy definitions.
"""
AUTHZ = "authorization"
ADMIN = "admin"
AUTH = "authentication"
AUDIT = "audit"
USER = "user" # was selfservice
ENROLL = "enrollment"
GETTOKEN = "gettoken"
WEBUI = "webui"
REGISTER = "register"
class ACTION(object):
__doc__ = """This is the list of usual actions."""
ASSIGN = "assign"
AUDIT = "auditlog"
AUTHITEMS = "fetch_authentication_items"
AUTHMAXSUCCESS = "auth_max_success"
AUTHMAXFAIL = "auth_max_fail"
AUTOASSIGN = "autoassignment"
CACONNECTORREAD = "caconnectorread"
CACONNECTORWRITE = "caconnectorwrite"
CACONNECTORDELETE = "caconnectordelete"
CHALLENGERESPONSE = "challenge_response"
GETCHALLENGES = "getchallenges"
COPYTOKENPIN = "copytokenpin"
COPYTOKENUSER = "copytokenuser"
DEFAULT_TOKENTYPE = "default_tokentype"
DELETE = "delete"
DISABLE = "disable"
EMAILCONFIG = "smtpconfig"
ENABLE = "enable"
ENCRYPTPIN = "encrypt_pin"
GETSERIAL = "getserial"
GETRANDOM = "getrandom"
IMPORT = "importtokens"
LASTAUTH = "last_auth"
LOGINMODE = "login_mode"
LOGOUTTIME = "logout_time"
LOSTTOKEN = 'losttoken'
LOSTTOKENPWLEN = "losttoken_PW_length"
LOSTTOKENPWCONTENTS = "losttoken_PW_contents"
LOSTTOKENVALID = "losttoken_valid"
MACHINERESOLVERWRITE = "mresolverwrite"
MACHINERESOLVERDELETE = "mresolverdelete"
MACHINELIST = "machinelist"
MACHINETOKENS = "manage_machine_tokens"
MANGLE = "mangle"
MAXTOKENREALM = "max_token_per_realm"
MAXTOKENUSER = "max_token_per_user"
NODETAILSUCCESS = "no_detail_on_success"
NODETAILFAIL = "no_detail_on_fail"
OTPPIN = "otppin"
OTPPINRANDOM = "otp_pin_random"
OTPPINMAXLEN = 'otp_pin_maxlength'
OTPPINMINLEN = 'otp_pin_minlength'
OTPPINCONTENTS = 'otp_pin_contents'
PASSNOTOKEN = "passOnNoToken"
PASSNOUSER = "passOnNoUser"
PASSTHRU = "passthru"
PASSWORDRESET = "password_reset"
PINHANDLING = "pinhandling"
POLICYDELETE = "policydelete"
POLICYWRITE = "policywrite"
POLICYTEMPLATEURL = "policy_template_url"
REALM = "realm"
REMOTE_USER = "remote_user"
REQUIREDEMAIL = "requiredemail"
RESET = "reset"
RESOLVERDELETE = "resolverdelete"
RESOLVERWRITE = "resolverwrite"
RESOLVER = "resolver"
RESYNC = "resync"
REVOKE = "revoke"
SET = "set"
SETPIN = "setpin"
SETREALM = "setrealm"
SERIAL = "serial"
SYSTEMDELETE = "configdelete"
SYSTEMWRITE = "configwrite"
CONFIGDOCUMENTATION = "system_documentation"
TOKENISSUER = "tokenissuer"
TOKENLABEL = "tokenlabel"
TOKENPAGESIZE = "token_page_size"
TOKENREALMS = "tokenrealms"
TOKENTYPE = "tokentype"
TOKENWIZARD = "tokenwizard"
TOKENWIZARD2ND = "tokenwizard_2nd_token"
UNASSIGN = "unassign"
USERLIST = "userlist"
USERPAGESIZE = "user_page_size"
ADDUSER = "adduser"
DELETEUSER = "deleteuser"
UPDATEUSER = "updateuser"
USERDETAILS = "user_details"
APIKEY = "api_key_required"
SETHSM = "set_hsm_password"
SMTPSERVERWRITE = "smtpserver_write"
RADIUSSERVERWRITE = "radiusserver_write"
REALMDROPDOWN = "realm_dropdown"
EVENTHANDLINGWRITE = "eventhandling_write"
class LOGINMODE(object):
__doc__ = """This is the list of possible values for the login mode."""
USERSTORE = "userstore"
PRIVACYIDEA = "privacyIDEA"
DISABLE = "disable"
class REMOTE_USER(object):
__doc__ = """The list of possible values for the remote_user policy."""
DISABLE = "disable"
ACTIVE = "allowed"
class ACTIONVALUE(object):
__doc__ = """This is a list of usual action values for e.g. policy
action-values like otppin."""
TOKENPIN = "tokenpin"
USERSTORE = "userstore"
DISABLE = "disable"
NONE = "none"
class AUTOASSIGNVALUE(object):
__doc__ = """This is the possible values for autoassign"""
USERSTORE = "userstore"
NONE = "any_pin"
class PolicyClass(object):
"""
The Policy_Object will contain all database policy entries for easy
filtering and mangling.
It will be created at the beginning of the request and is supposed to stay
alive unchanged during the request.
"""
def __init__(self):
"""
Create the Policy_Object from the database table
"""
self.policies = []
# read the policies from the database and store it in the object
policies = Policy.query.all()
for pol in policies:
# read each policy
self.policies.append(pol.get())
@log_with(log)
def get_policies(self, name=None, scope=None, realm=None, active=None,
resolver=None, user=None, client=None, action=None,
adminrealm=None, time=None, all_times=False):
"""
Return the policies of the given filter values
:param name:
:param scope:
:param realm:
:param active:
:param resolver:
:param user:
:param client:
:param action:
:param adminrealm: This is the realm of the admin. This is only
evaluated in the scope admin.
:param time: The optional time, for which the policies should be
fetched. The default time is now()
:type time: datetime
:param all_times: If True the time restriction of the policies is
ignored. Policies of all time ranges will be returned.
:type all_times: bool
:return: list of policies
:rtype: list of dicts
"""
reduced_policies = self.policies
# filter policy for time. If no time is set or is a time is set and
# it matches the time_range, then we add this policy
if not all_times:
reduced_policies = [policy for policy in reduced_policies if
(policy.get("time") and
check_time_in_range(policy.get("time"), time))
or not policy.get("time")]
log.debug("Policies after matching time: {0!s}".format(
reduced_policies))
# Do exact matches for "name", "active" and "scope", as these fields
# can only contain one entry
p = [("name", name), ("active", active), ("scope", scope)]
for searchkey, searchvalue in p:
if searchvalue is not None:
reduced_policies = [policy for policy in reduced_policies if
policy.get(searchkey) == searchvalue]
log.debug("Policies after matching {1!s}: {0!s}".format(
reduced_policies, searchkey))
p = [("action", action), ("user", user), ("resolver", resolver),
("realm", realm)]
# If this is an admin-policy, we also do check the adminrealm
if scope == "admin":
p.append(("adminrealm", adminrealm))
for searchkey, searchvalue in p:
if searchvalue is not None:
new_policies = []
# first we find policies, that really match!
# Either with the real value or with a "*"
# values can be excluded by a leading "!" or "-"
for policy in reduced_policies:
value_found = False
value_excluded = False
# iterate through the list of values:
for value in policy.get(searchkey):
if value and value[0] in ["!", "-"] and \
searchvalue == value[1:]:
value_excluded = True
elif type(searchvalue) == list and value in \
searchvalue + ["*"]:
value_found = True
elif value in [searchvalue, "*"]:
value_found = True
if value_found and not value_excluded:
new_policies.append(policy)
# We also find the policies with no distinct information
# about the request value
for policy in reduced_policies:
if not policy.get(searchkey):
new_policies.append(policy)
reduced_policies = new_policies
log.debug("Policies after matching {1!s}: {0!s}".format(
reduced_policies, searchkey))
# Match the client IP.
# Client IPs may be direct match, may be located in subnets or may
# be excluded by a leading "-" or "!" sign.
# The client definition in the policy may ba a comma separated list.
# It may start with a "-" or a "!" to exclude the client
# from a subnet.
# Thus a client 10.0.0.2 matches a policy "10.0.0.0/8, -10.0.0.1" but
# the client 10.0.0.1 does not match the policy "10.0.0.0/8, -10.0.0.1".
# An empty client definition in the policy matches all clients.
if client is not None:
new_policies = []
for policy in reduced_policies:
client_found = False
client_excluded = False
for polclient in policy.get("client"):
if polclient[0] in ['-', '!']:
# exclude the client?
if IPAddress(client) in IPNetwork(polclient[1:]):
log.debug("the client %s is excluded by %s in "
"policy %s" % (client, polclient, policy))
client_excluded = True
elif IPAddress(client) in IPNetwork(polclient):
client_found = True
if client_found and not client_excluded:
# The client was contained in the defined subnets and was
# not excluded
new_policies.append(policy)
# If there is a policy without any client, we also add it to the
# accepted list.
for policy in reduced_policies:
if not policy.get("client"):
new_policies.append(policy)
reduced_policies = new_policies
log.debug("Policies after matching client".format(
reduced_policies))
return reduced_policies
@log_with(log)
def get_action_values(self, action, scope=SCOPE.AUTHZ, realm=None,
resolver=None, user=None, client=None, unique=False,
allow_white_space_in_action=False):
"""
Get the defined action values for a certain action like
scope: authorization
action: tokentype
would return a list of the tokentypes
scope: authorization
action: serial
would return a list of allowed serials
:param unique: if set, the function will raise an exception if more
than one value is returned
:param allow_white_space_in_action: Some policies like emailtext
would allow entering text with whitespaces. These whitespaces
must not be used to separate action values!
:type allow_white_space_in_action: bool
:return: A list of the allowed tokentypes
:rtype: list
"""
action_values = []
policies = self.get_policies(scope=scope,
action=action, active=True,
realm=realm, resolver=resolver, user=user,
client=client)
for pol in policies:
action_dict = pol.get("action", {})
action_value = action_dict.get(action, "")
"""
We must distinguish actions like:
tokentype=totp hotp motp,
where the string represents a list divided by spaces, and
smstext='your otp is <otp>'
where the spaces are part of the string.
"""
if action_value.startswith("'") and action_value.endswith("'"):
action_values.append(action_dict.get(action)[1:-1])
elif allow_white_space_in_action:
action_values.append(action_dict.get(action))
else:
action_values.extend(action_dict.get(action, "").split())
# reduce the entries to unique entries
action_values = list(set(action_values))
if unique:
if len(action_values) > 1:
raise PolicyError("There are conflicting %s"
" definitions!" % action)
return action_values
@log_with(log)
def ui_get_rights(self, scope, realm, username, client=None):
"""
Get the rights derived from the policies for the given realm and user.
Works for admins and normal users.
It fetches all policies for this user and compiles a maximum list of
allowed rights, that can be used to hide certain UI elements.
:param scope: Can be SCOPE.ADMIN or SCOPE.USER
:param realm: Is either user users realm or the adminrealm
:param username: The loginname of the user
:param client: The HTTP client IP
:return: A list of actions
"""
from privacyidea.lib.auth import ROLE
from privacyidea.lib.token import get_dynamic_policy_definitions
rights = []
userealm = None
adminrealm = None
logged_in_user = {"username": username,
"realm": realm}
if scope == SCOPE.ADMIN:
adminrealm = realm
logged_in_user["role"] = ROLE.ADMIN
elif scope == SCOPE.USER:
userealm = realm
logged_in_user["role"] = ROLE.USER
pols = self.get_policies(scope=scope,
adminrealm=adminrealm,
realm=userealm,
user=username, active=True,
client=client)
for pol in pols:
for action, action_value in pol.get("action").items():
if action_value:
rights.append(action)
# check if we have policies at all:
pols = self.get_policies(scope=scope, active=True)
if not pols:
# We do not have any policies in this scope, so we return all
# possible actions in this scope.
log.debug("No policies defined, so we set all rights.")
static_rights = get_static_policy_definitions(scope).keys()
enroll_rights = get_dynamic_policy_definitions(scope).keys()
rights = static_rights + enroll_rights
# reduce the list
rights = list(set(rights))
log.debug("returning the admin rights: {0!s}".format(rights))
return rights
@log_with(log)
def ui_get_enroll_tokentypes(self, client, logged_in_user):
"""
Return a dictionary of the allowed tokentypes for the logged in user.
This used for the token enrollment UI.
It looks like this:
{"hotp": "HOTP: event based One Time Passwords",
"totp": "TOTP: time based One Time Passwords",
"spass": "SPass: Simple Pass token. Static passwords",
"motp": "mOTP: classical mobile One Time Passwords",
"sshkey": "SSH Public Key: The public SSH key",
"yubikey": "Yubikey AES mode: One Time Passwords with Yubikey",
"remote": "Remote Token: Forward authentication request to another server",
"yubico": "Yubikey Cloud mode: Forward authentication request to YubiCloud",
"radius": "RADIUS: Forward authentication request to a RADIUS server",
"email": "EMail: Send a One Time Passwort to the users email address",
"sms": "SMS: Send a One Time Password to the users mobile phone",
"certificate": "Certificate: Enroll an x509 Certificate Token."}
:param client: Client IP address
:type client: basestring
:param logged_in_user: The Dict of the logged in user
:type logged_in_user: dict
:return: list of token types, the user may enroll
"""
from privacyidea.lib.auth import ROLE
enroll_types = {}
role = logged_in_user.get("role")
if role == ROLE.ADMIN:
admin_realm = logged_in_user.get("realm")
user_realm = None
else:
admin_realm = None
user_realm = logged_in_user.get("realm")
# check, if we have a policy definition at all.
pols = self.get_policies(scope=role, active=True)
tokenclasses = get_token_classes()
for tokenclass in tokenclasses:
# Check if the tokenclass is ui enrollable for "user" or "admin"
if role in tokenclass.get_class_info("ui_enroll"):
enroll_types[tokenclass.get_class_type()] = \
tokenclass.get_class_info("description")
if pols:
# admin policies or user policies are set, so we need to
# test, which tokens are allowed to be enrolled for this user
for tokentype in enroll_types.keys():
# determine, if there is a enrollment policy for this very type
typepols = self.get_policies(scope=role, client=client,
user=logged_in_user.get("username"),
realm=user_realm,
active=True,
action="enroll"+tokentype.upper(),
adminrealm=admin_realm)
if not typepols:
# If there is no policy allowing the enrollment of this
# tokentype, it is deleted.
del(enroll_types[tokentype])
return enroll_types
# --------------------------------------------------------------------------
#
# NEW STUFF
#
#
@log_with(log)
def set_policy(name=None, scope=None, action=None, realm=None, resolver=None,
user=None, time=None, client=None, active=True, adminrealm=None):
"""
Function to set a policy.
If the policy with this name already exists, it updates the policy.
It expects a dict of with the following keys:
:param name: The name of the policy
:param scope: The scope of the policy. Something like "admin", "system",
"authentication"
:param action: A scope specific action or a comma separated list of actions
:type active: basestring
:param realm: A realm, for which this policy is valid
:param resolver: A resolver, for which this policy is valid
:param user: A username or a list of usernames
:param time: N/A if type()
:param client: A client IP with optionally a subnet like 172.16.0.0/16
:param active: If the policy is active or not
:type active: bool
:return: The database ID od the the policy
:rtype: int
"""
if type(action) == dict:
action_list = []
for k, v in action.items():
if v is not True:
# value key
action_list.append("{0!s}={1!s}".format(k, v))
else:
# simple boolean value
action_list.append(k)
action = ", ".join(action_list)
if type(action) == list:
action = ", ".join(action)
if type(realm) == list:
realm = ", ".join(realm)
if type(adminrealm) == list:
adminrealm = ", ".join(adminrealm)
if type(user) == list:
user = ", ".join(user)
if type(resolver) == list:
resolver = ", ".join(resolver)
if type(client) == list:
client = ", ".join(client)
p = Policy(name, action=action, scope=scope, realm=realm,
user=user, time=time, client=client, active=active,
resolver=resolver, adminrealm=adminrealm).save()
return p
@log_with(log)
def enable_policy(name, enable=True):
"""
Enable or disable the policy with the given name
:param name:
:return: ID of the policy
"""
if not Policy.query.filter(Policy.name == name).first():
raise ParameterError("The policy with name '{0!s}' does not exist".format(name))
# Update the policy
p = set_policy(name=name, active=enable)
return p
@log_with(log)
def delete_policy(name):
"""
Function to delete one named policy
:param name: the name of the policy to be deleted
:return: the count of the deleted policies.
:rtype: int
"""
p = Policy.query.filter_by(name=name)
res = p.delete()
db.session.commit()
return res
@log_with(log)
def export_policies(policies):
"""
This function takes a policy list and creates an export file from it
:param policies: a policy definition
:type policies: list of policy dictionaries
:return: the contents of the file
:rtype: string
"""
file_contents = ""
if policies:
for policy in policies:
file_contents += "[{0!s}]\n".format(policy.get("name"))
for key, value in policy.items():
file_contents += "{0!s} = {1!s}\n".format(key, value)
file_contents += "\n"
return file_contents
@log_with(log)
def import_policies(file_contents):
"""
This function imports policies from a file.
The file has a config_object format, i.e. the text file has a header
[<policy_name>]
key = value
and key value pairs.
:param file_contents: The contents of the file
:type file_contents: basestring
:return: number of imported policies
:rtype: int
"""
policies = ConfigObj(file_contents.split('\n'), encoding="UTF-8")
res = 0
for policy_name, policy in policies.iteritems():
ret = set_policy(name=policy_name,
action=eval(policy.get("action")),
scope=policy.get("scope"),
realm=eval(policy.get("realm", "[]")),
user=eval(policy.get("user", "[]")),
resolver=eval(policy.get("resolver", "[]")),
client=eval(policy.get("client", "[]")),
time=policy.get("time", "")
)
if ret > 0:
log.debug("import policy {0!s}: {1!s}".format(policy_name, ret))
res += 1
return res
@log_with(log)
def get_static_policy_definitions(scope=None):
"""
These are the static hard coded policy definitions.
They can be enhanced by token based policy definitions, that can be found
in lib.token.get_dynamic_policy_definitions.
:param scope: Optional the scope of the policies
:type scope: basestring
:return: allowed scopes with allowed actions, the type of action and a
description.
:rtype: dict
"""
resolvers = get_resolver_list().keys()
realms = get_realms().keys()
smtpconfigs = [server.config.identifier for server in get_smtpservers()]
radiusconfigs = [radius.config.identifier for radius in
get_radiusservers()]
radiusconfigs.insert(0, "userstore")
pol = {
SCOPE.REGISTER: {
ACTION.RESOLVER: {'type': 'str',
'value': resolvers,
'desc': _('Define in which resolver the user '
'should be registered.')},
ACTION.REALM: {'type': 'str',
'value': realms,
'desc': _('Define in which realm the user should '
'be registered.')},
ACTION.EMAILCONFIG: {'type': 'str',
'value': smtpconfigs,
'desc': _('The SMTP server configuration, '
'that should be used to send the '
'registration email.')},
ACTION.REQUIREDEMAIL: {'type': 'str',
'desc': _('Only users with this email '
'address are allowed to '
'register. This is a regular '
'expression.')}
},
SCOPE.ADMIN: {
ACTION.ENABLE: {'type': 'bool',
'desc': _('Admin is allowed to enable tokens.')},
ACTION.DISABLE: {'type': 'bool',
'desc': _('Admin is allowed to disable tokens.')},
ACTION.SET: {'type': 'bool',
'desc': _(
'Admin is allowed to set token properties.')},
ACTION.SETPIN: {'type': 'bool',
'desc': _(
'Admin is allowed to set the OTP PIN of '
'tokens.')},
ACTION.RESYNC: {'type': 'bool',
'desc': _('Admin is allowed to resync tokens.')},
ACTION.RESET: {'type': 'bool',
'desc': _(
'Admin is allowed to reset the Failcounter of '
'a token.')},
ACTION.REVOKE: {'tpye': 'bool',
'desc': _("Admin is allowed to revoke a token")},
ACTION.ASSIGN: {'type': 'bool',
'desc': _(
'Admin is allowed to assign a token to a '
'user.')},
ACTION.UNASSIGN: {'type': 'bool',
'desc': _(
'Admin is allowed to remove the token from '
'a user, '
'i.e. unassign a token.')},
ACTION.IMPORT: {'type': 'bool',
'desc': _(
'Admin is allowed to import token files.')},
ACTION.DELETE: {'type': 'bool',
'desc': _(
'Admin is allowed to remove tokens from the '
'database.')},
ACTION.USERLIST: {'type': 'bool',
'desc': _(
'Admin is allowed to view the list of the '
'users.')},
ACTION.MACHINELIST: {'type': 'bool',
'desc': _('The Admin is allowed to list '
'the machines.')},
ACTION.MACHINETOKENS: {'type': 'bool',
'desc': _('The Admin is allowed to attach '
'and detach tokens to machines.')},
ACTION.AUTHITEMS: {'type': 'bool',
'desc': _('The Admin is allowed to fetch '
'authentication items of tokens '
'assigned to machines.')},
# 'checkstatus': {'type': 'bool',
# 'desc' : _('Admin is allowed to check the
# status of a challenge'
# "group": "tools"},
ACTION.TOKENREALMS: {'type': 'bool',
'desc': _('Admin is allowed to manage the '
'realms of a token.')},
ACTION.GETSERIAL: {'type': 'bool',
'desc': _('Admin is allowed to retrieve a serial'
' for a given OTP value.'),
"group": "tools"},
ACTION.GETRANDOM: {'type': 'bool',
'desc': _('Admin is allowed to retrieve '
'random keys from privacyIDEA.')},
# 'checkserial': {'type': 'bool',
# 'desc': _('Admin is allowed to check if a serial '
# 'is unique'),
# "group": "tools"},
ACTION.COPYTOKENPIN: {'type': 'bool',
'desc': _(
'Admin is allowed to copy the PIN of '
'one token '
'to another token.'),
"group": "tools"},
ACTION.COPYTOKENUSER: {'type': 'bool',
'desc': _(
'Admin is allowed to copy the assigned '
'user to another'
' token, i.e. assign a user ot '
'another token.'),
"group": "tools"},
ACTION.LOSTTOKEN: {'type': 'bool',
'desc': _('Admin is allowed to trigger the '
'lost token workflow.'),
"group": "tools"},
# 'getotp': {
# 'type': 'bool',
# 'desc': _('Allow the administrator to retrieve OTP values
# for tokens.'),
# "group": "tools"},
ACTION.SYSTEMWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the system configuration."),
"group": "system"},
ACTION.SYSTEMDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"keys in the system "
"configuration."),
"group": "system"},
ACTION.CONFIGDOCUMENTATION: {'type': 'bool',
'desc': _('Admin is allowed to '
'export a documentation '
'of the complete '
'configuration including '
'resolvers and realm.'),
'group': 'system'},
ACTION.POLICYWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the policies."),
"group": "system"},
ACTION.POLICYDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"policies."),
"group": "system"},
ACTION.RESOLVERWRITE: {'type': 'bool',
"desc": _("Admin is allowed to write and "
"modify the "
"resolver and realm "
"configuration."),
"group": "system"},
ACTION.RESOLVERDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"resolvers and realms."),
"group": "system"},
ACTION.CACONNECTORWRITE: {'type': 'bool',
"desc": _("Admin is allowed to create new"
" CA Connector definitions "
"and modify existing ones."),
"group": "system"},
ACTION.CACONNECTORDELETE: {'type': 'bool',
"desc": _("Admin is allowed to delete "
"CA Connector definitions."),
"group": "system"},
ACTION.MACHINERESOLVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to "
"write and modify the "
"machine resolvers."),
'group': "system"},
ACTION.MACHINERESOLVERDELETE: {'type': 'bool',
'desc': _("Admin is allowed to "
"delete "
"machine resolvers."),
'group': "system"},
ACTION.AUDIT: {'type': 'bool',
"desc": _("Admin is allowed to view the Audit log."),
"group": "system"},
ACTION.ADDUSER: {'type': 'bool',
"desc": _("Admin is allowed to add users in a "
"userstore/UserIdResolver."),
"group": "system"},
ACTION.UPDATEUSER: {'type': 'bool',
"desc": _("Admin is allowed to update the "
"users data in a userstore."),
"group": "system"},
ACTION.DELETEUSER: {'type': 'bool',
"desc": _("Admin is allowed to delete a user "
"object in a userstore.")},
ACTION.SETHSM: {'type': 'bool',
'desc': _("Admin is allowed to set the password "
"of the HSM/Security Module.")},
ACTION.GETCHALLENGES: {'type': 'bool',
'desc': _("Admin is allowed to retrieve "
"the list of active challenges.")},
ACTION.SMTPSERVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write new "
"SMTP server definitions.")},
ACTION.RADIUSSERVERWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write "
"new RADIUS server "
"definitions.")},
ACTION.EVENTHANDLINGWRITE: {'type': 'bool',
'desc': _("Admin is allowed to write "
"and modify the event "
"handling configuration.")}
},
# 'gettoken': {
# 'max_count_dpw': {'type': 'int',
# 'desc' : _('When OTP values are retrieved for
# a DPW token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# 'max_count_hotp': {'type': 'int',
# 'desc' : _('When OTP values are retrieved
# for a HOTP token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# 'max_count_totp': {'type': 'int',
# 'desc' : _('When OTP values are retrieved
# for a TOTP token, '
# 'this is the maximum number of
# retrievable OTP values.')},
# },
SCOPE.USER: {
ACTION.ASSIGN: {
'type': 'bool',
'desc': _("The user is allowed to assign an existing token"
" that is not yet assigned"
" using the token serial number.")},
ACTION.DISABLE: {'type': 'bool',
'desc': _(
'The user is allowed to disable his own '
'tokens.')},
ACTION.ENABLE: {'type': 'bool',
'desc': _(
"The user is allowed to enable his own "
"tokens.")},
ACTION.DELETE: {'type': 'bool',
"desc": _(
"The user is allowed to delete his own "
"tokens.")},
ACTION.UNASSIGN: {'type': 'bool',
"desc": _("The user is allowed to unassign his "
"own tokens.")},
ACTION.RESYNC: {'type': 'bool',
"desc": _("The user is allowed to resyncronize his "
"tokens.")},
ACTION.REVOKE: {'type': 'bool',
'desc': _("The user is allowed to revoke a token")},
ACTION.RESET: {'type': 'bool',
'desc': _('The user is allowed to reset the '
'failcounter of his tokens.')},
ACTION.SETPIN: {'type': 'bool',
"desc": _("The user is allowed to set the OTP "
"PIN "
"of his tokens.")},
ACTION.OTPPINMAXLEN: {'type': 'int',
'value': range(0, 32),
"desc": _("Set the maximum allowed length "
"of the OTP PIN.")},
ACTION.OTPPINMINLEN: {'type': 'int',
'value': range(0, 32),
"desc": _("Set the minimum required length "
"of the OTP PIN.")},
ACTION.OTPPINCONTENTS: {'type': 'str',
"desc": _("Specifiy the required "
"contents of the OTP PIN. "
"(c)haracters, (n)umeric, "
"(s)pecial, (o)thers. [+/-]!")},
# 'setMOTPPIN': {'type': 'bool',
# "desc": _("The user is allowed to set the mOTP
# PIN of his mOTP tokens.")},
# 'getotp': {'type': 'bool',
# "desc": _("The user is allowed to retrieve OTP
# values for his own tokens.")},
# 'activateQR': {'type': 'bool',
# "desc": _("The user is allowed to enroll a QR
# token.")},
# 'max_count_dpw': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a DPW token.")},
# 'max_count_hotp': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a HOTP token.")},
# 'max_count_totp': {'type': 'int',
# "desc": _("This is the maximum number of OTP
# values, the user is allowed to retrieve for a TOTP token.")},
ACTION.AUDIT: {
'type': 'bool',
'desc': _('Allow the user to view his own token history.')},
ACTION.USERLIST: {'type': 'bool',
'desc': _("The user is allowed to view his "
"own user information.")},
ACTION.UPDATEUSER: {'type': 'bool',
'desc': _("The user is allowed to update his "
"own user information, like changing "
"his password.")},
ACTION.PASSWORDRESET: {'type': 'bool',
'desc': _("The user is allowed to do a "
"password reset in an editable "
"UserIdResolver.")}
# 'getserial': {
# 'type': 'bool',
# 'desc': _('Allow the user to search an unassigned token by
# OTP value.')},
},
SCOPE.ENROLL: {
ACTION.MAXTOKENREALM: {
'type': 'int',
'desc': _('Limit the number of allowed tokens in a realm.')},
ACTION.MAXTOKENUSER: {
'type': 'int',
'desc': _('Limit the number of tokens a user may have '
'assigned.')},
ACTION.OTPPINRANDOM: {
'type': 'int',
'value': range(0, 32),
"desc": _("Set a random OTP PIN with this length for a "
"token.")},
ACTION.PINHANDLING: {
'type': 'str',
'desc': _('In case of a random OTP PIN use this python '
'module to process the PIN.')},
ACTION.ENCRYPTPIN: {
'type': 'bool',
"desc": _("The OTP PIN can be hashed or encrypted. Hashing "
"the PIN is the default behaviour.")},
ACTION.TOKENLABEL: {
'type': 'str',
'desc': _("Set label for a new enrolled Google Authenticator. "
"Possible tags are <u> (user), <r> ("
"realm), <s> (serial).")},
ACTION.TOKENISSUER: {
'type': 'str',
'desc': _("This is the issuer label for new enrolled Google "
"Authenticators.")
},
ACTION.AUTOASSIGN: {
'type': 'str',
'value': [AUTOASSIGNVALUE.NONE, AUTOASSIGNVALUE.USERSTORE],
'desc': _("Users can assign a token just by using the "
"unassigned token to authenticate.")},
ACTION.LOSTTOKENPWLEN: {
'type': 'int',
'value': range(1, 32),
'desc': _('The length of the password in case of '
'temporary token (lost token).')},
ACTION.LOSTTOKENPWCONTENTS: {
'type': 'str',
'desc': _('The contents of the temporary password, '
'described by the characters C, c, n, s.')},
ACTION.LOSTTOKENVALID: {
'type': 'int',
'value': range(1, 61),
'desc': _('The length of the validity for the temporary '
'token (in days).')},
},
SCOPE.AUTH: {
ACTION.OTPPIN: {
'type': 'str',
'value': [ACTIONVALUE.TOKENPIN, ACTIONVALUE.USERSTORE,
ACTIONVALUE.NONE],
'desc': _('Either use the Token PIN , use the Userstore '
'Password or use no fixed password '
'component.')},
ACTION.CHALLENGERESPONSE: {
'type': 'str',
'desc': _('This is a whitespace separated list of tokentypes, '
'that can be used with challenge response.')
},
ACTION.PASSTHRU: {
'type': 'str',
'value': radiusconfigs,
'desc': _('If set, the user in this realm will be '
'authenticated against the userstore or against the '
'given RADIUS config,'
' if the user has no tokens assigned.')
},
ACTION.PASSNOTOKEN: {
'type': 'bool',
'desc': _('If the user has no token, the authentication '
'request for this user will always be true.')
},
ACTION.PASSNOUSER: {
'type': 'bool',
'desc': _('If the user user does not exist, '
'the authentication request for this '
'non-existing user will always be true.')
},
ACTION.MANGLE: {
'type': 'str',
'desc': _('Can be used to modify the parameters pass, '
'user and realm in an authentication request. See '
'the documentation for an example.')
}
# 'qrtanurl': {
# 'type': 'str',
# 'desc': _('The URL for the half automatic mode that should
# be '
# 'used in a QR Token')
# },
# 'challenge_response': {
# 'type': 'str',
# 'desc': _('A list of tokentypes for which challenge response '
# 'should be used.')
# }
},
SCOPE.AUTHZ: {
ACTION.AUTHMAXSUCCESS: {
'type': 'str',
'desc': _("You can specify how many successful authentication "
"requests a user is allowed to do in a given time. "
"Specify like 1/5s, 2/10m, 10/1h - s, m, h being "
"second, minute and hour.")
},
ACTION.AUTHMAXFAIL: {
'type': 'str',
'desc': _("You can specify how many failed authentication "
"requests a user is allowed to do in a given time. "
"Specify like 1/5s, 2/10m, 10/1h - s, m, h being "
"second, minute and hour.")
},
ACTION.LASTAUTH: {
'type': 'str',
'desc': _("You can specify in which time frame the user needs "
"to authenticate again with this token. If the user "
"authenticates later, authentication will fail. "
"Specify like 30h, 7d or 1y.")
},
ACTION.TOKENTYPE: {
'type': 'str',
'desc': _('The user will only be authenticated with this '
'very tokentype.')},
ACTION.SERIAL: {
'type': 'str',
'desc': _('The user will only be authenticated if the serial '
'number of the token matches this regexp.')},
ACTION.SETREALM: {
'type': 'str',
'value': realms,
'desc': _('The Realm of the user is set to this very realm. '
'This is important if the user is not contained in '
'the default realm and can not pass his realm.')},
ACTION.NODETAILSUCCESS: {
'type': 'bool',
'desc': _('In case of successful authentication additional '
'no detail information will be returned.')},
ACTION.NODETAILFAIL: {
'type': 'bool',
'desc': _('In case of failed authentication additional '
'no detail information will be returned.')},
ACTION.APIKEY: {
'type': 'bool',
'desc': _('The sending of an API Auth Key is required during'
'authentication. This avoids rogue authenticate '
'requests against the /validate/check interface.')
}
},
SCOPE.WEBUI: {
ACTION.LOGINMODE: {
'type': 'str',
'desc': _(
'If set to "privacyIDEA" the users and admins need to '
'authenticate against privacyIDEA when they log in '
'to the Web UI. Defaults to "userstore"'),
'value': [LOGINMODE.USERSTORE, LOGINMODE.PRIVACYIDEA,
LOGINMODE.DISABLE],
},
ACTION.REMOTE_USER: {
'type': 'str',
'value': [REMOTE_USER.ACTIVE, REMOTE_USER.DISABLE],
'desc': _('The REMOTE_USER set by the webserver can be used '
'to login to privacyIDEA or it will be ignored. '
'Defaults to "disable".')
},
ACTION.LOGOUTTIME: {
'type': 'int',
'desc': _("Set the time in seconds after which the user will "
"be logged out from the WebUI. Default: 120")
},
ACTION.TOKENPAGESIZE: {
'type': 'int',
'desc': _("Set how many tokens should be displayed in the "
"token view on one page.")
},
ACTION.USERPAGESIZE: {
'type': 'int',
'desc': _("Set how many users should be displayed in the user "
"view on one page.")
},
ACTION.USERDETAILS: {
'type': 'bool',
'desc': _("Whether the user ID and the resolver should be "
"displayed in the token list.")
},
ACTION.POLICYTEMPLATEURL: {
'type': 'str',
'desc': _("The URL of a repository, where the policy "
"templates can be found. (Default "
"https://raw.githubusercontent.com/privacyidea/"
"policy-templates/master/templates/)")
},
ACTION.TOKENWIZARD: {
'type': 'bool',
'desc': _("As long as a user has no token, he will only see"
" a token wizard in the UI.")
},
ACTION.TOKENWIZARD2ND: {
'type': 'bool',
'desc': _("The tokenwizard will be displayed in the token "
"menu, even if the user already has a token.")
},
ACTION.DEFAULT_TOKENTYPE: {
'type': 'str',
'desc': _("This is the default token type in the token "
"enrollment dialog."),
'value': get_token_types()
},
ACTION.REALMDROPDOWN: {
'type': 'bool',
'desc': _("If this is checked, a dropdown combobox with the "
"realms is displayed in the login screen.")
}
}
# 'ocra': {
# 'request': {
# 'type': 'bool',
# 'desc': _('Allow to do a ocra/request.')},
# 'status': {
# 'type': 'bool',
# 'desc': _('Allow to check the transaction status.')},
# 'activationcode': {
# 'type': 'bool',
# 'desc': _('Allow to do an ocra/getActivationCode.')},
# 'calcOTP': {
# 'type': 'bool',
# 'desc': _('Allow to do an ocra/calculateOtp.')}
# },
}
if scope:
ret = pol.get(scope, {})
else:
ret = pol
return ret
| jalr/privacyidea | privacyidea/lib/policy.py | Python | agpl-3.0 | 57,193 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import re
from decimal import Decimal
from datetime import datetime
from weboob.browser.filters.json import Dict
from weboob.browser.elements import ItemElement, ListElement, method
from weboob.browser.pages import JsonPage, HTMLPage, pagination
from weboob.browser.filters.standard import CleanText, CleanDecimal, Regexp, Env, BrowserURL, Filter, Format
from weboob.browser.filters.html import CleanHTML, XPath
from weboob.capabilities.base import NotAvailable, NotLoaded
from weboob.capabilities.housing import Housing, HousingPhoto, City
class DictElement(ListElement):
def find_elements(self):
for el in self.el[0].get(self.item_xpath):
yield el
class CitiesPage(JsonPage):
@method
class get_cities(DictElement):
item_xpath = 'locations'
class item(ItemElement):
klass = City
obj_id = Dict('label')
obj_name = Dict('label')
class SearchPage(HTMLPage):
@pagination
@method
class iter_housings(ListElement):
item_xpath = '//div[starts-with(@id, "bloc-vue-")]'
def next_page(self):
js_datas = CleanText('//div[@id="js-data"]/@data-rest-search-request')(self)
total_page = self.page.browser.get_total_page(js_datas.split('?')[-1])
m = re.match(".*page=(\d?)(?:&.*)?", self.page.url)
if m:
current_page = int(m.group(1))
next_page = current_page + 1
if next_page <= total_page:
return self.page.url.replace('page=%d' % current_page, 'page=%d' % next_page)
class item(ItemElement):
klass = Housing
obj_id = CleanText('./@data-classified-id')
obj_title = CleanText('./div/h2[@itemprop="name"]/a')
obj_location = CleanText('./div/h2[@itemprop="name"]/span[class="item-localisation"]')
obj_cost = CleanDecimal('./div/div/span[@class="price-label"]')
obj_currency = Regexp(CleanText('./div/div/span[@class="price-label"]'),
'.*([%s%s%s])' % (u'€', u'$', u'£'), default=u'€')
obj_text = CleanText('./div/div/div[@itemprop="description"]')
obj_area = CleanDecimal(Regexp(CleanText('./div/h2[@itemprop="name"]/a'),
'(.*?)(\d*) m2(.*?)', '\\2', default=None),
default=NotAvailable)
def obj_phone(self):
phone = CleanText('./div/div/ul/li/span[@class="js-clickphone"]',
replace=[(u'Téléphoner : ', u'')],
default=NotAvailable)(self)
if '...' in phone:
return NotLoaded
return phone
def obj_photos(self):
url = CleanText('./div/div/a/img[@itemprop="image"]/@src')(self)
return [HousingPhoto(url)]
class TypeDecimal(Filter):
def filter(self, el):
return Decimal(el)
class FromTimestamp(Filter):
def filter(self, el):
return datetime.fromtimestamp(el / 1000.0)
class PhonePage(JsonPage):
def get_phone(self):
return self.doc.get('phoneNumber')
class HousingPage2(JsonPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Env('_id')
obj_title = Dict('characteristics/titleWithTransaction')
obj_location = Format('%s %s %s', Dict('location/address'),
Dict('location/postalCode'), Dict('location/cityLabel'))
obj_cost = TypeDecimal(Dict('characteristics/price'))
obj_currency = u'€'
obj_text = CleanHTML(Dict('characteristics/description'))
obj_url = BrowserURL('housing_html', _id=Env('_id'))
obj_area = TypeDecimal(Dict('characteristics/area'))
obj_date = FromTimestamp(Dict('characteristics/date'))
def obj_photos(self):
photos = []
for img in Dict('characteristics/images')(self):
m = re.search('http://thbr\.figarocms\.net.*(http://.*)', img)
if m:
photos.append(HousingPhoto(m.group(1)))
return photos
def obj_details(self):
details = {}
details['fees'] = Dict('characteristics/fees')(self)
details['bedrooms'] = Dict('characteristics/bedroomCount')(self)
details['energy'] = Dict('characteristics/energyConsumptionCategory')(self)
rooms = Dict('characteristics/roomCount')(self)
if len(rooms):
details['rooms'] = rooms[0]
details['available'] = Dict('characteristics/available')(self)
return details
def get_total_page(self):
return self.doc.get('pagination').get('total')
class HousingPage(HTMLPage):
@method
class get_housing(ItemElement):
klass = Housing
obj_id = Env('_id')
obj_title = CleanText('//h1[@itemprop="name"]')
obj_location = CleanText('//span[@class="informations-localisation"]')
obj_cost = CleanDecimal('//span[@itemprop="price"]')
obj_currency = Regexp(CleanText('//span[@itemprop="price"]'),
'.*([%s%s%s])' % (u'€', u'$', u'£'), default=u'€')
obj_text = CleanHTML('//div[@itemprop="description"]')
obj_url = BrowserURL('housing', _id=Env('_id'))
obj_area = CleanDecimal(Regexp(CleanText('//h1[@itemprop="name"]'),
'(.*?)(\d*) m2(.*?)', '\\2'), default=NotAvailable)
def obj_photos(self):
photos = []
for img in XPath('//a[@class="thumbnail-link"]/img[@itemprop="image"]')(self):
url = Regexp(CleanText('./@src'), 'http://thbr\.figarocms\.net.*(http://.*)')(img)
photos.append(HousingPhoto(url))
return photos
def obj_details(self):
details = dict()
for item in XPath('//div[@class="features clearfix"]/ul/li')(self):
key = CleanText('./span[@class="name"]')(item)
value = CleanText('./span[@class="value"]')(item)
if value and key:
details[key] = value
key = CleanText('//div[@class="title-dpe clearfix"]')(self)
value = CleanText('//div[@class="energy-consumption"]')(self)
if value and key:
details[key] = value
return details
| frankrousseau/weboob | modules/explorimmo/pages.py | Python | agpl-3.0 | 7,242 |
"""
Django module container for classes and operations related to the "Course Module" content type
"""
import logging
from cStringIO import StringIO
from lxml import etree
from path import Path as path
from pytz import utc
import requests
from datetime import datetime
from lazy import lazy
from xmodule import course_metadata_utils
from xmodule.course_metadata_utils import DEFAULT_START_DATE
from xmodule.exceptions import UndefinedContext
from xmodule.seq_module import SequenceDescriptor, SequenceModule
from xmodule.graders import grader_from_conf
from xmodule.tabs import CourseTabList, InvalidTabsException
from xmodule.mixin import LicenseMixin
import json
from xblock.core import XBlock
from xblock.fields import Scope, List, String, Dict, Boolean, Integer, Float
from .fields import Date
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
CATALOG_VISIBILITY_CATALOG_AND_ABOUT = "both"
CATALOG_VISIBILITY_ABOUT = "about"
CATALOG_VISIBILITY_NONE = "none"
class StringOrDate(Date):
def from_json(self, value):
"""
Parse an optional metadata key containing a time or a string:
if present, assume it's a string if it doesn't parse.
"""
try:
result = super(StringOrDate, self).from_json(value)
except ValueError:
return value
if result is None:
return value
else:
return result
def to_json(self, value):
"""
Convert a time struct or string to a string.
"""
try:
result = super(StringOrDate, self).to_json(value)
except:
return value
if result is None:
return value
else:
return result
edx_xml_parser = etree.XMLParser(dtd_validation=False, load_dtd=False,
remove_comments=True, remove_blank_text=True)
_cached_toc = {}
class Textbook(object):
def __init__(self, title, book_url):
self.title = title
self.book_url = book_url
@lazy
def start_page(self):
return int(self.table_of_contents[0].attrib['page'])
@lazy
def end_page(self):
# The last page should be the last element in the table of contents,
# but it may be nested. So recurse all the way down the last element
last_el = self.table_of_contents[-1]
while last_el.getchildren():
last_el = last_el[-1]
return int(last_el.attrib['page'])
@lazy
def table_of_contents(self):
"""
Accesses the textbook's table of contents (default name "toc.xml") at the URL self.book_url
Returns XML tree representation of the table of contents
"""
toc_url = self.book_url + 'toc.xml'
# cdodge: I've added this caching of TOC because in Mongo-backed instances (but not Filesystem stores)
# course modules have a very short lifespan and are constantly being created and torn down.
# Since this module in the __init__() method does a synchronous call to AWS to get the TOC
# this is causing a big performance problem. So let's be a bit smarter about this and cache
# each fetch and store in-mem for 10 minutes.
# NOTE: I have to get this onto sandbox ASAP as we're having runtime failures. I'd like to swing back and
# rewrite to use the traditional Django in-memory cache.
try:
# see if we already fetched this
if toc_url in _cached_toc:
(table_of_contents, timestamp) = _cached_toc[toc_url]
age = datetime.now(utc) - timestamp
# expire every 10 minutes
if age.seconds < 600:
return table_of_contents
except Exception as err:
pass
# Get the table of contents from S3
log.info("Retrieving textbook table of contents from %s", toc_url)
try:
r = requests.get(toc_url)
except Exception as err:
msg = 'Error %s: Unable to retrieve textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
# TOC is XML. Parse it
try:
table_of_contents = etree.fromstring(r.text)
except Exception as err:
msg = 'Error %s: Unable to parse XML for textbook table of contents at %s' % (err, toc_url)
log.error(msg)
raise Exception(msg)
return table_of_contents
def __eq__(self, other):
return (self.title == other.title and
self.book_url == other.book_url)
def __ne__(self, other):
return not self == other
class TextbookList(List):
def from_json(self, values):
textbooks = []
for title, book_url in values:
try:
textbooks.append(Textbook(title, book_url))
except:
# If we can't get to S3 (e.g. on a train with no internet), don't break
# the rest of the courseware.
log.exception("Couldn't load textbook ({0}, {1})".format(title, book_url))
continue
return textbooks
def to_json(self, values):
json_data = []
for val in values:
if isinstance(val, Textbook):
json_data.append((val.title, val.book_url))
elif isinstance(val, tuple):
json_data.append(val)
else:
continue
return json_data
class CourseFields(object):
lti_passports = List(
display_name=_("LTI Passports"),
help=_('Enter the passports for course LTI tools in the following format: "id:client_key:client_secret".'),
scope=Scope.settings
)
textbooks = TextbookList(
help=_("List of pairs of (title, url) for textbooks used in this course"),
default=[],
scope=Scope.content
)
wiki_slug = String(help=_("Slug that points to the wiki for this course"), scope=Scope.content)
enrollment_start = Date(help=_("Date that enrollment for this class is opened"), scope=Scope.settings)
enrollment_end = Date(help=_("Date that enrollment for this class is closed"), scope=Scope.settings)
start = Date(
help=_("Start time when this module is visible"),
default=DEFAULT_START_DATE,
scope=Scope.settings
)
end = Date(help=_("Date that this class ends"), scope=Scope.settings)
individual_end_days = Integer(
help=_("Number of days from the base date to the course ends"),
scope=Scope.settings
)
individual_end_hours = Integer(
help=_("Number of hours from the base date to the course ends"),
scope=Scope.settings
)
individual_end_minutes = Integer(
help=_("Number of minutes from the base date to the course ends"),
scope=Scope.settings
)
cosmetic_display_price = Integer(
display_name=_("Cosmetic Course Display Price"),
help=_(
"The cost displayed to students for enrolling in the course. If a paid course registration price is "
"set by an administrator in the database, that price will be displayed instead of this one."
),
default=0,
scope=Scope.settings,
)
deadline_start = Date(help=_("Date that this course is closed, this setting is only affect to course list"), scope=Scope.settings)
terminate_start = Date(help=_("Date that this course terminate(course is hidden)"), scope=Scope.settings)
advertised_start = String(
display_name=_("Course Advertised Start Date"),
help=_(
"Enter the date you want to advertise as the course start date, if this date is different from the set "
"start date. To advertise the set start date, enter null."
),
scope=Scope.settings
)
pre_requisite_courses = List(
display_name=_("Pre-Requisite Courses"),
help=_("Pre-Requisite Course key if this course has a pre-requisite course"),
scope=Scope.settings
)
grading_policy = Dict(
help=_("Grading policy definition for this class"),
default={
"GRADER": [
{
"type": "Homework",
"min_count": 12,
"drop_count": 2,
"short_label": "HW",
"weight": 0.15,
},
{
"type": "Lab",
"min_count": 12,
"drop_count": 2,
"weight": 0.15,
},
{
"type": "Midterm Exam",
"short_label": "Midterm",
"min_count": 1,
"drop_count": 0,
"weight": 0.3,
},
{
"type": "Final Exam",
"short_label": "Final",
"min_count": 1,
"drop_count": 0,
"weight": 0.4,
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5,
},
},
scope=Scope.content
)
show_calculator = Boolean(
display_name=_("Show Calculator"),
help=_("Enter true or false. When true, students can see the calculator in the course."),
default=False,
scope=Scope.settings
)
display_name = String(
help=_("Enter the name of the course as it should appear in the edX.org course list."),
default="Empty",
display_name=_("Course Display Name"),
scope=Scope.settings
)
course_edit_method = String(
display_name=_("Course Editor"),
help=_('Enter the method by which this course is edited ("XML" or "Studio").'),
default="Studio",
scope=Scope.settings,
deprecated=True # Deprecated because someone would not edit this value within Studio.
)
tabs = CourseTabList(help="List of tabs to enable in this course", scope=Scope.settings, default=[])
end_of_course_survey_url = String(
display_name=_("Course Survey URL"),
help=_("Enter the URL for the end-of-course survey. If your course does not have a survey, enter null."),
scope=Scope.settings,
deprecated=True # We wish to remove this entirely, TNL-3399
)
discussion_blackouts = List(
display_name=_("Discussion Blackout Dates"),
help=_(
'Enter pairs of dates between which students cannot post to discussion forums. Inside the provided '
'brackets, enter an additional set of square brackets surrounding each pair of dates you add. '
'Format each pair of dates as ["YYYY-MM-DD", "YYYY-MM-DD"]. To specify times as well as dates, '
'format each pair as ["YYYY-MM-DDTHH:MM", "YYYY-MM-DDTHH:MM"]. Be sure to include the "T" between '
'the date and time. For example, an entry defining two blackout periods looks like this, including '
'the outer pair of square brackets: [["2015-09-15", "2015-09-21"], ["2015-10-01", "2015-10-08"]] '
),
scope=Scope.settings
)
discussion_topics = Dict(
display_name=_("Discussion Topic Mapping"),
help=_(
'Enter discussion categories in the following format: "CategoryName": '
'{"id": "i4x-InstitutionName-CourseNumber-course-CourseRun"}. For example, one discussion '
'category may be "Lydian Mode": {"id": "i4x-UniversityX-MUS101-course-2015_T1"}. The "id" '
'value for each category must be unique. In "id" values, the only special characters that are '
'supported are underscore, hyphen, and period.'
),
scope=Scope.settings
)
discussion_sort_alpha = Boolean(
display_name=_("Discussion Sorting Alphabetical"),
scope=Scope.settings, default=False,
help=_(
"Enter true or false. If true, discussion categories and subcategories are sorted alphabetically. "
"If false, they are sorted chronologically."
)
)
announcement = Date(
display_name=_("Course Announcement Date"),
help=_("Enter the date to announce your course."),
scope=Scope.settings
)
cohort_config = Dict(
display_name=_("Cohort Configuration"),
help=_(
"Enter policy keys and values to enable the cohort feature, define automated student assignment to "
"groups, or identify any course-wide discussion topics as private to cohort members."
),
scope=Scope.settings
)
is_new = Boolean(
display_name=_("Course Is New"),
help=_(
"Enter true or false. If true, the course appears in the list of new courses on edx.org, and a New! "
"badge temporarily appears next to the course image."
),
scope=Scope.settings
)
mobile_available = Boolean(
display_name=_("Mobile Course Available"),
help=_("Enter true or false. If true, the course will be available to mobile devices."),
default=False,
scope=Scope.settings
)
video_upload_pipeline = Dict(
display_name=_("Video Upload Credentials"),
help=_("Enter the unique identifier for your course's video files provided by edX."),
scope=Scope.settings
)
facebook_url = String(
help=_(
"Enter the URL for the official course Facebook group. "
"If you provide a URL, the mobile app includes a button that students can tap to access the group."
),
default=None,
display_name=_("Facebook URL"),
scope=Scope.settings
)
no_grade = Boolean(
display_name=_("Course Not Graded"),
help=_("Enter true or false. If true, the course will not be graded."),
default=False,
scope=Scope.settings
)
disable_progress_graph = Boolean(
display_name=_("Disable Progress Graph"),
help=_("Enter true or false. If true, students cannot view the progress graph."),
default=False,
scope=Scope.settings
)
pdf_textbooks = List(
display_name=_("PDF Textbooks"),
help=_("List of dictionaries containing pdf_textbook configuration"), scope=Scope.settings
)
html_textbooks = List(
display_name=_("HTML Textbooks"),
help=_(
"For HTML textbooks that appear as separate tabs in the courseware, enter the name of the tab (usually "
"the name of the book) as well as the URLs and titles of all the chapters in the book."
),
scope=Scope.settings
)
remote_gradebook = Dict(
display_name=_("Remote Gradebook"),
help=_(
"Enter the remote gradebook mapping. Only use this setting when "
"REMOTE_GRADEBOOK_URL has been specified."
),
scope=Scope.settings
)
enable_ccx = Boolean(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
display_name=_("Enable CCX"),
help=_(
# Translators: Custom Courses for edX (CCX) is an edX feature for re-using course content. CCX Coach is
# a role created by a course Instructor to enable a person (the "Coach") to manage the custom course for
# his students.
"Allow course instructors to assign CCX Coach roles, and allow coaches to manage Custom Courses on edX."
" When false, Custom Courses cannot be created, but existing Custom Courses will be preserved."
),
default=False,
scope=Scope.settings
)
allow_anonymous = Boolean(
display_name=_("Allow Anonymous Discussion Posts"),
help=_("Enter true or false. If true, students can create discussion posts that are anonymous to all users."),
scope=Scope.settings, default=True
)
allow_anonymous_to_peers = Boolean(
display_name=_("Allow Anonymous Discussion Posts to Peers"),
help=_(
"Enter true or false. If true, students can create discussion posts that are anonymous to other "
"students. This setting does not make posts anonymous to course staff."
),
scope=Scope.settings, default=False
)
advanced_modules = List(
display_name=_("Advanced Module List"),
help=_("Enter the names of the advanced components to use in your course."),
scope=Scope.settings
)
has_children = True
info_sidebar_name = String(
display_name=_("Course Info Sidebar Name"),
help=_(
"Enter the heading that you want students to see above your course handouts on the Course Info page. "
"Your course handouts appear in the right panel of the page."
),
scope=Scope.settings, default='Course Handouts')
show_timezone = Boolean(
help=_(
"True if timezones should be shown on dates in the courseware. "
"Deprecated in favor of due_date_display_format."
),
scope=Scope.settings, default=True
)
due_date_display_format = String(
display_name=_("Due Date Display Format"),
help=_(
"Enter the format for due dates. The default is Mon DD, YYYY. Enter \"%m-%d-%Y\" for MM-DD-YYYY, "
"\"%d-%m-%Y\" for DD-MM-YYYY, \"%Y-%m-%d\" for YYYY-MM-DD, or \"%Y-%d-%m\" for YYYY-DD-MM."
),
scope=Scope.settings, default=None
)
enrollment_domain = String(
display_name=_("External Login Domain"),
help=_("Enter the external login method students can use for the course."),
scope=Scope.settings
)
certificates_show_before_end = Boolean(
display_name=_("Certificates Downloadable Before End"),
help=_(
"Enter true or false. If true, students can download certificates before the course ends, if they've "
"met certificate requirements."
),
scope=Scope.settings,
default=False,
deprecated=True
)
certificates_display_behavior = String(
display_name=_("Certificates Display Behavior"),
help=_(
"Enter end, early_with_info, or early_no_info. After certificate generation, students who passed see a "
"link to their certificates on the dashboard and students who did not pass see information about the "
"grading configuration. The default is early_with_info, which displays this certificate information to "
"all students as soon as certificates are generated. To display this certificate information to all "
"students after the course end date, enter end. To display only the links to passing students "
"as soon as certificates are generated, enter early_no_info."
),
scope=Scope.settings,
default="early_with_info"
)
course_image = String(
display_name=_("Course About Page Image"),
help=_(
"Edit the name of the course image file. You must upload this file on the Files & Uploads page. "
"You can also set the course image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default="images_course_image.jpg"
)
custom_logo = String(
display_name=_("Custom Logo Image"),
help=_(
"Edit the name of the custom logo image file. You must upload this file on the Files & Uploads page. "
"You can also set the custom logo image on the Settings & Details page."
),
scope=Scope.settings,
# Ensure that courses imported from XML keep their image
default=""
)
issue_badges = Boolean(
display_name=_("Issue Open Badges"),
help=_(
"Issue Open Badges badges for this course. Badges are generated when certificates are created."
),
scope=Scope.settings,
default=True
)
## Course level Certificate Name overrides.
cert_name_short = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the short name of the course to use on the certificate that "
"students receive when they complete the course."
),
display_name=_("Certificate Name (Short)"),
scope=Scope.settings,
default=""
)
cert_name_long = String(
help=_(
"Use this setting only when generating PDF certificates. "
"Between quotation marks, enter the long name of the course to use on the certificate that students "
"receive when they complete the course."
),
display_name=_("Certificate Name (Long)"),
scope=Scope.settings,
default=""
)
cert_html_view_enabled = Boolean(
display_name=_("Certificate Web/HTML View Enabled"),
help=_("If true, certificate Web/HTML views are enabled for the course."),
scope=Scope.settings,
default=False,
)
cert_html_view_overrides = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Web/HTML View Overrides"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific overrides for the Web/HTML template parameters here (JSON format)"),
scope=Scope.settings,
)
# Specific certificate information managed via Studio (should eventually fold other cert settings into this)
certificates = Dict(
# Translators: This field is the container for course-specific certifcate configuration values
display_name=_("Certificate Configuration"),
# Translators: These overrides allow for an alternative configuration of the certificate web view
help=_("Enter course-specific configuration information here (JSON format)"),
scope=Scope.settings,
)
# An extra property is used rather than the wiki_slug/number because
# there are courses that change the number for different runs. This allows
# courses to share the same css_class across runs even if they have
# different numbers.
#
# TODO get rid of this as soon as possible or potentially build in a robust
# way to add in course-specific styling. There needs to be a discussion
# about the right way to do this, but arjun will address this ASAP. Also
# note that the courseware template needs to change when this is removed.
css_class = String(
display_name=_("CSS Class for Course Reruns"),
help=_("Allows courses to share the same css class across runs even if they have different numbers."),
scope=Scope.settings, default="",
deprecated=True
)
# TODO: This is a quick kludge to allow CS50 (and other courses) to
# specify their own discussion forums as external links by specifying a
# "discussion_link" in their policy JSON file. This should later get
# folded in with Syllabus, Course Info, and additional Custom tabs in a
# more sensible framework later.
discussion_link = String(
display_name=_("Discussion Forum External Link"),
help=_("Allows specification of an external link to replace discussion forums."),
scope=Scope.settings,
deprecated=True
)
# TODO: same as above, intended to let internal CS50 hide the progress tab
# until we get grade integration set up.
# Explicit comparison to True because we always want to return a bool.
hide_progress_tab = Boolean(
display_name=_("Hide Progress Tab"),
help=_("Allows hiding of the progress tab."),
scope=Scope.settings,
deprecated=True
)
display_organization = String(
display_name=_("Course Organization Display String"),
help=_(
"Enter the course organization that you want to appear in the courseware. This setting overrides the "
"organization that you entered when you created the course. To use the organization that you entered "
"when you created the course, enter null."
),
scope=Scope.settings
)
display_coursenumber = String(
display_name=_("Course Number Display String"),
help=_(
"Enter the course number that you want to appear in the courseware. This setting overrides the course "
"number that you entered when you created the course. To use the course number that you entered when "
"you created the course, enter null."
),
scope=Scope.settings,
default=""
)
max_student_enrollments_allowed = Integer(
display_name=_("Course Maximum Student Enrollment"),
help=_(
"Enter the maximum number of students that can enroll in the course. To allow an unlimited number of "
"students, enter null."
),
scope=Scope.settings
)
allow_public_wiki_access = Boolean(
display_name=_("Allow Public Wiki Access"),
help=_(
"Enter true or false. If true, edX users can view the course wiki even "
"if they're not enrolled in the course."
),
default=False,
scope=Scope.settings
)
invitation_only = Boolean(
display_name=_("Invitation Only"),
help=_("Whether to restrict enrollment to invitation by the course staff."),
default=False,
scope=Scope.settings
)
course_survey_name = String(
display_name=_("Pre-Course Survey Name"),
help=_("Name of SurveyForm to display as a pre-course survey to the user."),
default=None,
scope=Scope.settings,
deprecated=True
)
course_survey_required = Boolean(
display_name=_("Pre-Course Survey Required"),
help=_(
"Specify whether students must complete a survey before they can view your course content. If you "
"set this value to true, you must add a name for the survey to the Course Survey Name setting above."
),
default=False,
scope=Scope.settings,
deprecated=True
)
catalog_visibility = String(
display_name=_("Course Visibility In Catalog"),
help=_(
"Defines the access permissions for showing the course in the course catalog. This can be set to one "
"of three values: 'both' (show in catalog and allow access to about page), 'about' (only allow access "
"to about page), 'none' (do not show in catalog and do not allow access to an about page)."
),
default=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
scope=Scope.settings,
values=[
{"display_name": _("Both"), "value": CATALOG_VISIBILITY_CATALOG_AND_ABOUT},
{"display_name": _("About"), "value": CATALOG_VISIBILITY_ABOUT},
{"display_name": _("None"), "value": CATALOG_VISIBILITY_NONE}]
)
entrance_exam_enabled = Boolean(
display_name=_("Entrance Exam Enabled"),
help=_(
"Specify whether students must complete an entrance exam before they can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.settings,
)
entrance_exam_minimum_score_pct = Float(
display_name=_("Entrance Exam Minimum Score (%)"),
help=_(
"Specify a minimum percentage score for an entrance exam before students can view your course content. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=65,
scope=Scope.settings,
)
entrance_exam_id = String(
display_name=_("Entrance Exam ID"),
help=_("Content module identifier (location) of entrance exam."),
default=None,
scope=Scope.settings,
)
is_course_hidden = Boolean(
display_name=_("Course Is Hidden"),
default=False,
help=_(
"Enter true or false. If true, the course is hidden."
),
scope=Scope.settings,
deprecated=True
)
course_order = String(
display_name=_("Course Order"),
help=_("Course Order"),
default=None,
scope=Scope.settings,
)
course_category = List(
display_name=_("Course Category"),
help=_("Course Category"),
default=[],
scope=Scope.settings,
)
course_category_order = String(
display_name=_("Course Category Order"),
help=_("Course Category Order"),
default=None,
scope=Scope.settings,
)
course_category2 = String(
display_name=_("Course Category2"),
help=_("Course Category2"),
default=None,
scope=Scope.settings,
)
course_category_order2 = String(
display_name=_("Course Category Order2"),
help=_("Course Category Order2"),
default=None,
scope=Scope.settings,
)
is_f2f_course = Boolean(
display_name=_("Face-to-Face Course"),
default=False,
help=_("Enter true or false. If true, course is f2f classroom."),
scope=Scope.settings,
)
is_f2f_course_sell = Boolean(
display_name=_("Sell Face-to-Face Course"),
default=False,
help=_("Enter true or false. If true, f2f classroom is for sale."),
scope=Scope.settings,
)
course_canonical_name = String(
display_name=_("Course Canonical Name"),
help=_("Course Canonical Name."),
default="",
scope=Scope.settings,
)
course_contents_provider= String(
display_name=_("Course Contents Provider"),
help=_("Course contents provider."),
default="",
scope=Scope.settings,
)
teacher_name = String(
display_name=_("Teacher Name"),
help=_("Teacher name"),
default="",
scope=Scope.settings,
)
course_span = String(
display_name=_("Course Span"),
help=_("Offer period of the course."),
default="",
scope=Scope.settings,
)
social_sharing_url = String(
display_name=_("Social Media Sharing URL"),
help=_(
"If dashboard social sharing and custom course URLs are enabled, you can provide a URL "
"(such as the URL to a course About page) that social media sites can link to. URLs must "
"be fully qualified. For example: http://www.edx.org/course/Introduction-to-MOOCs-ITM001"
),
default=None,
scope=Scope.settings,
)
language = String(
display_name=_("Course Language"),
help=_("Specify the language of your course."),
default=None,
scope=Scope.settings
)
teams_configuration = Dict(
display_name=_("Teams Configuration"),
help=_(
"Enter configuration for the teams feature. Expects two entries: max_team_size and topics, where "
"topics is a list of topics."
),
scope=Scope.settings,
deprecated=True, # Deprecated until the teams feature is made generally available
)
enable_proctored_exams = Boolean(
display_name=_("Enable Proctored Exams"),
help=_(
"Enter true or false. If this value is true, proctored exams are enabled in your course. "
"Note that enabling proctored exams will also enable timed exams."
),
default=False,
scope=Scope.settings
)
enable_timed_exams = Boolean(
display_name=_("Enable Timed Exams"),
help=_(
"Enter true or false. If this value is true, timed exams are enabled in your course."
),
default=False,
scope=Scope.settings
)
minimum_grade_credit = Float(
display_name=_("Minimum Grade for Credit"),
help=_(
"The minimum grade that a learner must earn to receive credit in the course, "
"as a decimal between 0.0 and 1.0. For example, for 75%, enter 0.75."
),
default=0.8,
scope=Scope.settings,
)
self_paced = Boolean(
display_name=_("Self Paced"),
help=_(
"Set this to \"true\" to mark this course as self-paced. Self-paced courses do not have "
"due dates for assignments, and students can progress through the course at any rate before "
"the course ends."
),
default=False,
scope=Scope.settings
)
show_playback_tab = Boolean(
display_name=_("Show Playback Tab"),
help=_("Allows showing of the playback tab."),
default=False,
scope=Scope.settings,
)
show_attendance_tab = Boolean(
display_name=_("Show Attendance Tab"),
help=_("Allows showing of the attendance tab."),
default=False,
scope=Scope.settings
)
is_status_managed = Boolean(
display_name=_("Set course as status management target."),
help=_("Select 'True' to manage the status of this course."),
default=False,
scope=Scope.settings
)
target_library = List(
help=_(
"Target library list for the course."
),
scope=Scope.settings,
default=[]
)
playback_rate_1x_only = Boolean(
display_name=_("Hide Playback Rate"),
default=False,
help=_("JW Player playbackrate setting 1.0x only."),
scope=Scope.settings,
)
new_icon_display_days = Integer(
display_name=_("New Icon Display Days"),
help=_(
"The number of days to display new icon for date section in the info page."
),
default=7,
scope=Scope.settings,
)
class CourseModule(CourseFields, SequenceModule): # pylint: disable=abstract-method
"""
The CourseDescriptor needs its module_class to be a SequenceModule, but some code that
expects a CourseDescriptor to have all its fields can fail if it gets a SequenceModule instead.
This class is to make sure that all the fields are present in all cases.
"""
class CourseDescriptor(CourseFields, SequenceDescriptor, LicenseMixin):
"""
The descriptor for the course XModule
"""
module_class = CourseModule
def __init__(self, *args, **kwargs):
"""
Expects the same arguments as XModuleDescriptor.__init__
"""
super(CourseDescriptor, self).__init__(*args, **kwargs)
_ = self.runtime.service(self, "i18n").ugettext
if self.wiki_slug is None:
self.wiki_slug = self.location.course
if self.due_date_display_format is None and self.show_timezone is False:
# For existing courses with show_timezone set to False (and no due_date_display_format specified),
# set the due_date_display_format to what would have been shown previously (with no timezone).
# Then remove show_timezone so that if the user clears out the due_date_display_format,
# they get the default date display.
self.due_date_display_format = "DATE_TIME"
del self.show_timezone
# NOTE: relies on the modulestore to call set_grading_policy() right after
# init. (Modulestore is in charge of figuring out where to load the policy from)
# NOTE (THK): This is a last-minute addition for Fall 2012 launch to dynamically
# disable the syllabus content for courses that do not provide a syllabus
if self.system.resources_fs is None:
self.syllabus_present = False
else:
self.syllabus_present = self.system.resources_fs.exists(path('syllabus'))
self._grading_policy = {}
self.set_grading_policy(self.grading_policy)
if self.discussion_topics == {}:
self.discussion_topics = {_('General'): {'id': self.location.html_id()}}
try:
if not getattr(self, "tabs", []):
CourseTabList.initialize_default(self)
except InvalidTabsException as err:
raise type(err)('{msg} For course: {course_id}'.format(msg=err.message, course_id=unicode(self.id)))
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CourseDescriptor, self).non_editable_metadata_fields
# The only supported mode is currently 'random'.
# Add the mode field to non_editable_metadata_fields so that it doesn't
# render in the edit form.
non_editable_fields.extend([CourseFields.target_library])
return non_editable_fields
def set_grading_policy(self, course_policy):
"""
The JSON object can have the keys GRADER and GRADE_CUTOFFS. If either is
missing, it reverts to the default.
"""
if course_policy is None:
course_policy = {}
# Load the global settings as a dictionary
grading_policy = self.grading_policy
# BOY DO I HATE THIS grading_policy CODE ACROBATICS YET HERE I ADD MORE (dhm)--this fixes things persisted w/
# defective grading policy values (but not None)
if 'GRADER' not in grading_policy:
grading_policy['GRADER'] = CourseFields.grading_policy.default['GRADER']
if 'GRADE_CUTOFFS' not in grading_policy:
grading_policy['GRADE_CUTOFFS'] = CourseFields.grading_policy.default['GRADE_CUTOFFS']
# Override any global settings with the course settings
grading_policy.update(course_policy)
# Here is where we should parse any configurations, so that we can fail early
# Use setters so that side effecting to .definitions works
self.raw_grader = grading_policy['GRADER'] # used for cms access
self.grade_cutoffs = grading_policy['GRADE_CUTOFFS']
@classmethod
def read_grading_policy(cls, paths, system):
"""Load a grading policy from the specified paths, in order, if it exists."""
# Default to a blank policy dict
policy_str = '{}'
for policy_path in paths:
if not system.resources_fs.exists(policy_path):
continue
log.debug("Loading grading policy from {0}".format(policy_path))
try:
with system.resources_fs.open(policy_path) as grading_policy_file:
policy_str = grading_policy_file.read()
# if we successfully read the file, stop looking at backups
break
except IOError:
msg = "Unable to load course settings file from '{0}'".format(policy_path)
log.warning(msg)
return policy_str
@classmethod
def from_xml(cls, xml_data, system, id_generator):
instance = super(CourseDescriptor, cls).from_xml(xml_data, system, id_generator)
# bleh, have to parse the XML here to just pull out the url_name attribute
# I don't think it's stored anywhere in the instance.
course_file = StringIO(xml_data.encode('ascii', 'ignore'))
xml_obj = etree.parse(course_file, parser=edx_xml_parser).getroot()
policy_dir = None
url_name = xml_obj.get('url_name', xml_obj.get('slug'))
if url_name:
policy_dir = 'policies/' + url_name
# Try to load grading policy
paths = ['grading_policy.json']
if policy_dir:
paths = [policy_dir + '/grading_policy.json'] + paths
try:
policy = json.loads(cls.read_grading_policy(paths, system))
except ValueError:
system.error_tracker("Unable to decode grading policy as json")
policy = {}
# now set the current instance. set_grading_policy() will apply some inheritance rules
instance.set_grading_policy(policy)
return instance
@classmethod
def definition_from_xml(cls, xml_object, system):
textbooks = []
for textbook in xml_object.findall("textbook"):
textbooks.append((textbook.get('title'), textbook.get('book_url')))
xml_object.remove(textbook)
# Load the wiki tag if it exists
wiki_slug = None
wiki_tag = xml_object.find("wiki")
if wiki_tag is not None:
wiki_slug = wiki_tag.attrib.get("slug", default=None)
xml_object.remove(wiki_tag)
definition, children = super(CourseDescriptor, cls).definition_from_xml(xml_object, system)
definition['textbooks'] = textbooks
definition['wiki_slug'] = wiki_slug
# load license if it exists
definition = LicenseMixin.parse_license_from_xml(definition, xml_object)
return definition, children
def definition_to_xml(self, resource_fs):
xml_object = super(CourseDescriptor, self).definition_to_xml(resource_fs)
if len(self.textbooks) > 0:
textbook_xml_object = etree.Element('textbook')
for textbook in self.textbooks:
textbook_xml_object.set('title', textbook.title)
textbook_xml_object.set('book_url', textbook.book_url)
xml_object.append(textbook_xml_object)
if self.wiki_slug is not None:
wiki_xml_object = etree.Element('wiki')
wiki_xml_object.set('slug', self.wiki_slug)
xml_object.append(wiki_xml_object)
# handle license specifically. Default the course to have a license
# of "All Rights Reserved", if a license is not explicitly set.
self.add_license_to_xml(xml_object, default="all-rights-reserved")
return xml_object
def has_ended(self):
"""
Returns True if the current time is after the specified course end date.
Returns False if there is no end date specified.
"""
return course_metadata_utils.has_course_ended(self.end)
def has_terminated(self):
"""
Returns True if the current time is after the specified course terminated date.
Returns False if there is no terminated date specified.
"""
# backward compatibility
if self.is_course_hidden:
return True
if self.terminate_start is None:
return False
return datetime.now(utc) > self.terminate_start
def is_course_deadline(self):
"""
Returns True if the current time is after the specified course terminated date.
Returns False if there is no terminated date specified.
"""
if self.deadline_start is None:
return False
return datetime.now(utc) > self.deadline_start
def may_certify(self):
"""
Return whether it is acceptable to show the student a certificate download link.
"""
return course_metadata_utils.may_certify_for_course(
self.certificates_display_behavior,
self.certificates_show_before_end,
self.has_ended()
)
def has_started(self):
return course_metadata_utils.has_course_started(self.start)
@property
def grader(self):
return grader_from_conf(self.raw_grader)
@property
def raw_grader(self):
# force the caching of the xblock value so that it can detect the change
# pylint: disable=pointless-statement
self.grading_policy['GRADER']
return self._grading_policy['RAW_GRADER']
@raw_grader.setter
def raw_grader(self, value):
# NOTE WELL: this change will not update the processed graders. If we need that, this needs to call grader_from_conf
self._grading_policy['RAW_GRADER'] = value
self.grading_policy['GRADER'] = value
@property
def grade_cutoffs(self):
return self._grading_policy['GRADE_CUTOFFS']
@grade_cutoffs.setter
def grade_cutoffs(self, value):
self._grading_policy['GRADE_CUTOFFS'] = value
# XBlock fields don't update after mutation
policy = self.grading_policy
policy['GRADE_CUTOFFS'] = value
self.grading_policy = policy
@property
def lowest_passing_grade(self):
return min(self._grading_policy['GRADE_CUTOFFS'].values())
@property
def is_cohorted(self):
"""
Return whether the course is cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return False
return bool(config.get("cohorted"))
@property
def auto_cohort(self):
"""
Return whether the course is auto-cohorted.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if not self.is_cohorted:
return False
return bool(self.cohort_config.get(
"auto_cohort", False))
@property
def auto_cohort_groups(self):
"""
Return the list of groups to put students into. Returns [] if not
specified. Returns specified list even if is_cohorted and/or auto_cohort are
false.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
if self.cohort_config is None:
return []
else:
return self.cohort_config.get("auto_cohort_groups", [])
@property
def top_level_discussion_topic_ids(self):
"""
Return list of topic ids defined in course policy.
"""
topics = self.discussion_topics
return [d["id"] for d in topics.values()]
@property
def cohorted_discussions(self):
"""
Return the set of discussions that is explicitly cohorted. It may be
the empty set. Note that all inline discussions are automatically
cohorted based on the course's is_cohorted setting.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return set()
return set(config.get("cohorted_discussions", []))
@property
def always_cohort_inline_discussions(self):
"""
This allow to change the default behavior of inline discussions cohorting. By
setting this to False, all inline discussions are non-cohorted unless their
ids are specified in cohorted_discussions.
Note: No longer used. See openedx.core.djangoapps.course_groups.models.CourseCohortSettings.
"""
config = self.cohort_config
if config is None:
return True
return bool(config.get("always_cohort_inline_discussions", True))
@property
def is_newish(self):
"""
Returns if the course has been flagged as new. If
there is no flag, return a heuristic value considering the
announcement and the start dates.
"""
flag = self.is_new
if flag is None:
# Use a heuristic if the course has not been flagged
announcement, start, now = course_metadata_utils.sorting_dates(
self.start, self.advertised_start, self.announcement
)
if announcement and (now - announcement).days < 30:
# The course has been announced for less that month
return True
elif (now - start).days < 1:
# The course has not started yet
return True
else:
return False
elif isinstance(flag, basestring):
return flag.lower() in ['true', 'yes', 'y']
else:
return bool(flag)
@property
def sorting_score(self):
"""
Returns a tuple that can be used to sort the courses according
the how "new" they are. The "newness" score is computed using a
heuristic that takes into account the announcement and
(advertised) start dates of the course if available.
The lower the number the "newer" the course.
"""
return course_metadata_utils.sorting_score(self.start, self.advertised_start, self.announcement)
@lazy
def grading_context(self):
"""
This returns a dictionary with keys necessary for quickly grading
a student. They are used by grades.grade()
The grading context has two keys:
graded_sections - This contains the sections that are graded, as
well as all possible children modules that can affect the
grading. This allows some sections to be skipped if the student
hasn't seen any part of it.
The format is a dictionary keyed by section-type. The values are
arrays of dictionaries containing
"section_descriptor" : The section descriptor
"xmoduledescriptors" : An array of xmoduledescriptors that
could possibly be in the section, for any student
all_descriptors - This contains a list of all xmodules that can
effect grading a student. This is used to efficiently fetch
all the xmodule state for a FieldDataCache without walking
the descriptor tree again.
"""
# If this descriptor has been bound to a student, return the corresponding
# XModule. If not, just use the descriptor itself
try:
module = getattr(self, '_xmodule', None)
if not module:
module = self
except UndefinedContext:
module = self
def possibly_scored(usage_key):
"""Can this XBlock type can have a score or children?"""
return usage_key.block_type in self.block_types_affecting_grading
all_descriptors = []
graded_sections = {}
def yield_descriptor_descendents(module_descriptor):
for child in module_descriptor.get_children(usage_key_filter=possibly_scored):
yield child
for module_descriptor in yield_descriptor_descendents(child):
yield module_descriptor
for chapter in self.get_children():
for section in chapter.get_children():
if section.graded:
xmoduledescriptors = list(yield_descriptor_descendents(section))
xmoduledescriptors.append(section)
# The xmoduledescriptors included here are only the ones that have scores.
section_description = {
'section_descriptor': section,
'xmoduledescriptors': [child for child in xmoduledescriptors if child.has_score]
}
section_format = section.format if section.format is not None else ''
graded_sections[section_format] = graded_sections.get(section_format, []) + [section_description]
all_descriptors.extend(xmoduledescriptors)
all_descriptors.append(section)
return {'graded_sections': graded_sections,
'all_descriptors': all_descriptors, }
@lazy
def block_types_affecting_grading(self):
"""Return all block types that could impact grading (i.e. scored, or having children)."""
return frozenset(
cat for (cat, xblock_class) in XBlock.load_classes() if (
getattr(xblock_class, 'has_score', False) or getattr(xblock_class, 'has_children', False)
)
)
@staticmethod
def make_id(org, course, url_name):
return '/'.join([org, course, url_name])
@property
def id(self):
"""Return the course_id for this course"""
return self.location.course_key
def start_datetime_text(self, format_string="SHORT_DATE", time_zone=utc):
"""
Returns the desired text corresponding the course's start date and time in specified time zone, defaulted
to UTC. Prefers .advertised_start, then falls back to .start
"""
i18n = self.runtime.service(self, "i18n")
return course_metadata_utils.course_start_datetime_text(
self.start,
self.advertised_start,
format_string,
time_zone,
i18n.ugettext,
i18n.strftime
)
@property
def start_date_is_still_default(self):
"""
Checks if the start date set for the course is still default, i.e. .start has not been modified,
and .advertised_start has not been set.
"""
return course_metadata_utils.course_start_date_is_default(
self.start,
self.advertised_start
)
def end_datetime_text(self, format_string="SHORT_DATE", time_zone=utc):
"""
Returns the end date or date_time for the course formatted as a string.
"""
return course_metadata_utils.course_end_datetime_text(
self.end,
format_string,
time_zone,
self.runtime.service(self, "i18n").strftime
)
def get_discussion_blackout_datetimes(self):
"""
Get a list of dicts with start and end fields with datetime values from
the discussion_blackouts setting
"""
date_proxy = Date()
try:
ret = [
{"start": date_proxy.from_json(start), "end": date_proxy.from_json(end)}
for start, end
in filter(None, self.discussion_blackouts)
]
for blackout in ret:
if not blackout["start"] or not blackout["end"]:
raise ValueError
return ret
except (TypeError, ValueError):
log.exception(
"Error parsing discussion_blackouts %s for course %s",
self.discussion_blackouts,
self.id
)
return []
@property
def forum_posts_allowed(self):
"""
Return whether forum posts are allowed by the discussion_blackouts
setting
"""
blackouts = self.get_discussion_blackout_datetimes()
now = datetime.now(utc)
for blackout in blackouts:
if blackout["start"] <= now <= blackout["end"]:
return False
return True
@property
def number(self):
"""
Returns this course's number.
This is a "number" in the sense of the "course numbers" that you see at
lots of universities. For example, given a course
"Intro to Computer Science" with the course key "edX/CS-101/2014", the
course number would be "CS-101"
"""
return course_metadata_utils.number_for_course_location(self.location)
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.number
@property
def org(self):
return self.location.org
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that is in the location
"""
if self.display_organization:
return self.display_organization
return self.org
@property
def video_pipeline_configured(self):
"""
Returns whether the video pipeline advanced setting is configured for this course.
"""
return (
self.video_upload_pipeline is not None and
'course_video_upload_token' in self.video_upload_pipeline
)
def clean_id(self, padding_char='='):
"""
Returns a unique deterministic base32-encoded ID for the course.
The optional padding_char parameter allows you to override the "=" character used for padding.
"""
return course_metadata_utils.clean_course_key(self.location.course_key, padding_char)
@property
def teams_enabled(self):
"""
Returns whether or not teams has been enabled for this course.
Currently, teams are considered enabled when at least one topic has been configured for the course.
"""
if self.teams_configuration:
return len(self.teams_configuration.get('topics', [])) > 0
return False
@property
def teams_max_size(self):
"""
Returns the max size for teams if teams has been configured, else None.
"""
return self.teams_configuration.get('max_team_size', None)
@property
def teams_topics(self):
"""
Returns the topics that have been configured for teams for this course, else None.
"""
return self.teams_configuration.get('topics', None)
def get_user_partitions_for_scheme(self, scheme):
"""
Retrieve all user partitions defined in the course for a particular
partition scheme.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
return [
p for p in self.user_partitions
if p.scheme == scheme
]
def set_user_partitions_for_scheme(self, partitions, scheme):
"""
Set the user partitions for a particular scheme.
Preserves partitions associated with other schemes.
Arguments:
scheme (object): The user partition scheme.
Returns:
list of `UserPartition`
"""
other_partitions = [
p for p in self.user_partitions # pylint: disable=access-member-before-definition
if p.scheme != scheme
]
self.user_partitions = other_partitions + partitions # pylint: disable=attribute-defined-outside-init
@property
def can_toggle_course_pacing(self):
"""
Whether or not the course can be set to self-paced at this time.
Returns:
bool: False if the course has already started, True otherwise.
"""
return datetime.now(utc) <= self.start
class CourseSummary(object):
"""
A lightweight course summary class, which constructs split/mongo course summary without loading
the course. It is used at cms for listing courses to global staff user.
"""
course_info_fields = ['display_name', 'display_coursenumber', 'display_organization']
def __init__(self, course_locator, display_name=u"Empty", display_coursenumber=None, display_organization=None):
"""
Initialize and construct course summary
Arguments:
course_locator (CourseLocator): CourseLocator object of the course.
display_name (unicode): display name of the course. When you create a course from console, display_name
isn't set (course block has no key `display_name`). "Empty" name is returned when we load the course.
If `display_name` isn't present in the course block, use the `Empty` as default display name.
We can set None as a display_name in Course Advance Settings; Do not use "Empty" when display_name is
set to None.
display_coursenumber (unicode|None): Course number that is specified & appears in the courseware
display_organization (unicode|None): Course organization that is specified & appears in the courseware
"""
self.display_coursenumber = display_coursenumber
self.display_organization = display_organization
self.display_name = display_name
self.id = course_locator # pylint: disable=invalid-name
self.location = course_locator.make_usage_key('course', 'course')
@property
def display_org_with_default(self):
"""
Return a display organization if it has been specified, otherwise return the 'org' that
is in the location
"""
if self.display_organization:
return self.display_organization
return self.location.org
@property
def display_number_with_default(self):
"""
Return a display course number if it has been specified, otherwise return the 'course' that
is in the location
"""
if self.display_coursenumber:
return self.display_coursenumber
return self.location.course
| nttks/edx-platform | common/lib/xmodule/xmodule/course_module.py | Python | agpl-3.0 | 61,092 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import pickle
import xlrd
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class ClassNameCamelCase(orm.Model):
""" Model name: ClassNameCamelCase
"""
_inherit = 'product.product.import.inventory'
filename = '/home/administrator/photo/xls/inventory' # TODO parametrize
def action_correct_delta_from_csv(self, cr, uid, ids, context=None):
''' Generate report status for delta inventory
Read files for get product
'''
if context is None:
context = {}
# Pool used:
product_pool = self.pool.get('product.product')
mrp_pool = self.pool.get('mrp.production')
error = ''
note = ''
current_proxy = self.browse(cr, uid, ids, context=context)[0]
# ---------------------------------------------------------------------
# Read parameters:
# ---------------------------------------------------------------------
fullname = current_proxy.fullname
max_line = current_proxy.max_line
# Pickle part for speedup during debug:
use_pickle = False # TODO change
pickle_file = os.path.expanduser('~/pickle.store')
# Init check:
if not fullname:
raise osv.except_osv(
_('Import error'),
_('Need a file name to import in path %s' % fullname),
)
# Log activity:
_logger.info('Start import delta product form: %s' % self.filename)
# ---------------------------------------------------------------------
# Generate movement database:
# ---------------------------------------------------------------------
if use_pickle:
product_movement = pickle.load(
open(pickle_file, 'wb'))
else:
_logger.info('Read halfworked data type')
# Call report for halfwork:
data = {
'mode': 'halfwork',
'for_inventory_delta': True,
}
product_movement = mrp_pool.get_explode_report_object(
cr, uid, data=data, context=context)
# Call report for component:
_logger.info('Read component data type')
data['type'] = 'component'
product_movement.update(
mrp_pool.get_explode_report_object(
cr, uid, data=data, context=context))
pickle.dump(
product_movement,
open(pickle_file, 'wb'),
)
# Read excel filename:
try:
filename = os.path.join(self.filename, fullname)
wb = xlrd.open_workbook(filename)
ws = wb.sheet_by_index(0)
except:
raise osv.except_osv(
_('Open file error'),
_('Cannot found file: %s (or file not in correct format' % \
filename),
)
# Loop on line:
for i in range(0, max_line):
try:
row = ws.row(i) # generate error at end
except:
# Out of range error ends import:
note += _('Import end at line: %s\n') % i
break
try:
# Loop on colums (trace)
try:
default_code = str(row[0].value).replace('.0', '')
except:
default = ''
# Search product with code:
if not default_code:
error += _('%s. No default code on file found\n') % i
continue # jump
try:
product_qty = float(row[1].value)
except:
product_qty = 0
product_ids = product_pool.search(cr, uid, [
('default_code', '=', default_code)], context=context)
if not product_ids:
error += _(
'%s. Error code not found, code: %s\n') % (
i, default_code)
continue # jump
elif len(product_ids) > 1:
error += _(
'%s. Warning more code (take first), code: %s\n') % (
i, default_code)
record = product_movement.get(default_code, False)
if record:
inventory_delta = product_qty - \
sum((
record[3], # SAL value
- record[1], # negative OC value
- record[2], # positive OF value
#- record[0], # XXX no inventory start (yet delta)
)) + record[4] # Delta yet present
note += '%s | %s | %s (previous: %s)\n' % (
i, default_code, inventory_delta,
record[4])
else:
inventory_delta = product_qty
note += '%s. %s NO DATA (set as start)!!!\n' % (
i, default_code)
product_pool.write(cr, uid, product_ids[0], {
'inventory_delta': inventory_delta,
}, context=context)
except:
error += _('%s. Import error code: %s [%s]\n') % (
i, default_code, sys.exc_info())
self.write(cr, uid, ids, {
'error': error,
'note': 'File: %s\n%s' % (
filename, note),
}, context=context)
_logger.info('End import Delta product: %s' % fullname)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Micronaet/micronaet-bom | csv_setup_inventory_delta/delta.py | Python | agpl-3.0 | 7,673 |
# -*- coding: utf-8 -*-
# Copyright 2018, Esther Cisneros
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from datetime import timedelta
from openerp import _, api, fields, models
from openerp.exceptions import UserError
class AccountInvoice(models.Model):
_name = 'account.invoice'
_inherit = 'account.invoice'
invoice_replaced = fields.Many2one(
'account.invoice',
string=_("Invoice that replaces"),
)
date_cancelled = fields.Date(
string=_("Cancellation Date"),
)
number_cancel = fields.Char(
string=("Nombre de la factura cancelada"),
)
@api.multi
def action_cancel(self):
for inv in self:
if inv.id == inv.invoice_replaced.id:
raise UserError(_("Please select an invoice to substitute different to the invoice to be canceled"))
inv.date_cancelled = fields.Date.today()
inv.number_cancel = inv.number
return super(AccountInvoice, self).action_cancel()
@api.model
def send_email_invoice_canceled(self):
limit_date = timedelta(days=1)
date_today_ = fields.Date.today()
dd = fields.Datetime.from_string(date_today_)
date_cancel = dd - limit_date
inv_ids = self.search([
('state', '=', ['cancel']),
('company_id', '=', 1),
('type', '=', 'out_invoice'),
('date_cancelled', '=', date_cancel)])
table = ''
remp_date = ''
remp_rep = ''
for inve in inv_ids:
if not inve.date_cancelled:
remp_date = '---'
else:
remp_date = inve.date_cancelled
if not inve.invoice_replaced:
remp_rep = '---'
else:
remp_rep = inve.invoice_replaced.number
table += """
<tr><td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td align="right" style="border-bottom: 1px solid silver;">
%s</td></tr>
""" % (remp_date, inve.partner_id.name, inve.number_cancel, remp_rep)
mail_obj = self.env['mail.mail']
body_mail = u"""
<div summary="o_mail_notification" style="padding:0px; width:700px;
margin:0 auto; background: #FFFFFF repeat top /100%%; color:#77777
7">
<table cellspacing="0" cellpadding="0" style="width:700px;
border-collapse:collapse; background:inherit; color:inherit">
<tbody><tr>
<td valign="center" width="270" style="padding:5px 10px
5px 5px;font-size: 18px">
<p>Las siguientes facturas ya fueron canceladas</p>
</td>
<td valign="center" align="right" width="270"
style="padding:5px 15px 5px 10px; font-size: 12px;">
<p>
<strong>Sent by</strong>
<a href="http://erp.portalgebesa.com" style="text-
decoration:none; color: #a24689;">
<strong>%s</strong>
</a>
<strong>using</strong>
<a href="https://www.odoo.com" style="text-
decoration:none; color: #a24689;"><strong>Odoo
</strong></a>
</p>
</td>
</tr>
</tbody></table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%; color:#777777">
<table cellspacing="0" cellpadding="0" style="vertical-align:
top; padding:0px; border-collapse:collapse; background:inherit;
color:inherit">
<tbody><tr>
<td valign="top" style="width:700px; padding:5px 10px
5px 5px; ">
<div>
<hr width="100%%" style="background-color:
rgb(204,204,204);border:medium none;clear:both;
display:block;font-size:0px;min-height:1px;
line-height:0;margin:15px auto;padding:0">
</div>
</td>
</tr></tbody>
</table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%;color:#777777">
<table style="border-collapse:collapse; margin: 0 auto; width:
700px; background:inherit; color:inherit">
<tbody><tr>
<th width="16%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Fecha de Cancelacion</strong></th>
<th width="54%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Cliente</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura Cancelada</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura que Sustituye</strong></th>
</tr>
%s
</tbody>
</table>
</div>
""" % (self.env.user.company_id.name, table)
mail = mail_obj.create({
'subject': 'Facturas Canceladas',
'email_to': '[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected],[email protected]',
'headers': "{'Return-Path': u'[email protected]'}",
'body_html': body_mail,
'auto_delete': True,
'message_type': 'comment',
'model': 'account.invoice',
#'res_id': inv_ids[0].id,
})
mail.send()
@api.model
def send_email_invoice_canceled_tgalbo(self):
limit_date = timedelta(days=1)
date_today_ = fields.Date.today()
dd = fields.Datetime.from_string(date_today_)
date_cancel = dd - limit_date
inv_ids = self.search([
('state', '=', ['cancel']),
('company_id', '=', 4),
('type', '=', 'out_invoice'),
('date_cancelled', '=', date_cancel)])
table = ''
remp_date = ''
remp_rep = ''
for inve in inv_ids:
if not inve.date_cancelled:
remp_date = '---'
else:
remp_date = inve.date_cancelled
if not inve.invoice_replaced:
remp_rep = '---'
else:
remp_rep = inve.invoice_replaced.number
table += """
<tr><td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td style="border-bottom: 1px solid silver;">%s</td>
<td align="right" style="border-bottom: 1px solid silver;">
%s</td></tr>
""" % (remp_date, inve.partner_id.name, inve.number_cancel, remp_rep)
mail_obj = self.env['mail.mail']
body_mail = u"""
<div summary="o_mail_notification" style="padding:0px; width:700px;
margin:0 auto; background: #FFFFFF repeat top /100%%; color:#77777
7">
<table cellspacing="0" cellpadding="0" style="width:700px;
border-collapse:collapse; background:inherit; color:inherit">
<tbody><tr>
<td valign="center" width="270" style="padding:5px 10px
5px 5px;font-size: 18px">
<p>Las siguientes facturas ya fueron canceladas</p>
</td>
<td valign="center" align="right" width="270"
style="padding:5px 15px 5px 10px; font-size: 12px;">
<p>
<strong>Sent by</strong>
<a href="http://erp.portalgebesa.com" style="text-
decoration:none; color: #a24689;">
<strong>%s</strong>
</a>
<strong>using</strong>
<a href="https://www.odoo.com" style="text-
decoration:none; color: #a24689;"><strong>Odoo
</strong></a>
</p>
</td>
</tr>
</tbody></table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%; color:#777777">
<table cellspacing="0" cellpadding="0" style="vertical-align:
top; padding:0px; border-collapse:collapse; background:inherit;
color:inherit">
<tbody><tr>
<td valign="top" style="width:700px; padding:5px 10px
5px 5px; ">
<div>
<hr width="100%%" style="background-color:
rgb(204,204,204);border:medium none;clear:both;
display:block;font-size:0px;min-height:1px;
line-height:0;margin:15px auto;padding:0">
</div>
</td>
</tr></tbody>
</table>
</div>
<div style="padding:0px; width:700px; margin:0 auto; background:
#FFFFFF repeat top /100%%;color:#777777">
<table style="border-collapse:collapse; margin: 0 auto; width:
700px; background:inherit; color:inherit">
<tbody><tr>
<th width="16%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Fecha de Cancelacion</strong></th>
<th width="54%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Cliente</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura Cancelada</strong></th>
<th width="15%%" style="padding:5px 10px 5px 5px;font-
size: 14px; border-bottom: 2px solid silver;"><strong>
Factura que Sustituye</strong></th>
</tr>
%s
</tbody>
</table>
</div>
""" % (self.env.user.company_id.name, table)
mail = mail_obj.create({
'subject': 'Facturas Canceladas Transportes Galbo del Norte',
'email_to': '[email protected],[email protected],[email protected],[email protected]',
'headers': "{'Return-Path': u'[email protected]'}",
'body_html': body_mail,
'auto_delete': True,
'message_type': 'comment',
'model': 'account.invoice',
#'res_id': inv_ids[0].id,
})
mail.send()
| Gebesa-Dev/Addons-gebesa | account_invoice_replace/models/account_invoice_replace.py | Python | agpl-3.0 | 12,245 |
import functools
from itertools import combinations
from bears.c_languages.ClangBear import clang_available, ClangBear
from bears.c_languages.codeclone_detection.ClangCountingConditions import (
condition_dict)
from bears.c_languages.codeclone_detection.ClangCountVectorCreator import (
ClangCountVectorCreator)
from bears.c_languages.codeclone_detection.CloneDetectionRoutines import (
compare_functions, get_count_matrices)
from coala_utils.string_processing.StringConverter import StringConverter
from coalib.bears.GlobalBear import GlobalBear
from dependency_management.requirements.PipRequirement import PipRequirement
from coalib.collecting.Collectors import collect_dirs
from coalib.results.HiddenResult import HiddenResult
from coalib.settings.Setting import path_list, typed_ordered_dict
from coala_utils.decorators import (enforce_signature, generate_ordering,
generate_repr)
# counting_condition_dict is a function object generated by typed_dict. This
# function takes a setting and creates a dictionary out of it while it
# converts all keys to counting condition function objects (via the
# condition_dict) and all values to floats while unset values default to 1.
counting_condition_dict = typed_ordered_dict(
lambda setting: condition_dict[str(setting).lower()],
float,
1)
default_cc_dict = counting_condition_dict(StringConverter(
"""
used: 0,
returned: 1.4,
is_condition: 0,
in_condition: 1.4,
in_second_level_condition: 1.4,
in_third_level_condition: 1.0,
is_assignee: 0,
is_assigner: 0.6,
loop_content: 0,
second_level_loop_content,
third_level_loop_content,
is_param: 2,
is_called: 1.4,
is_call_param: 0.0,
in_sum: 2.0,
in_product: 0,
in_binary_operation,
member_accessed"""))
@generate_repr(('id', hex),
'origin',
'differences',
'count_matrices',
'message')
@generate_ordering('origin',
'differences',
'count_matrices',
'message')
class ClangFunctionDifferenceResult(HiddenResult):
@enforce_signature
def __init__(self, origin,
differences: list,
count_matrices: dict):
super().__init__(origin,
[differences, count_matrices])
self.differences = differences
self.count_matrices = count_matrices
def get_difference(function_pair,
count_matrices,
average_calculation,
poly_postprocessing,
exp_postprocessing):
"""
Retrieves the difference between two functions using the munkres algorithm.
:param function_pair: A tuple containing both indices for the
count_matrices dictionary.
:param count_matrices: A dictionary holding CMs.
:param average_calculation: If set to true the difference calculation
function will take the average of all variable
differences as the difference, else it will
normalize the function as a whole and thus
weighting in variables dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
:return: A tuple containing both function ids and their
difference.
"""
function_1, function_2 = function_pair
return (function_1,
function_2,
compare_functions(count_matrices[function_1],
count_matrices[function_2],
average_calculation,
poly_postprocessing,
exp_postprocessing))
class ClangFunctionDifferenceBear(GlobalBear):
check_prerequisites = classmethod(clang_available)
LANGUAGES = ClangBear.LANGUAGES
REQUIREMENTS = ClangBear.REQUIREMENTS | {PipRequirement('munkres3', '1.0')}
def run(self,
counting_conditions: counting_condition_dict = default_cc_dict,
average_calculation: bool = False,
poly_postprocessing: bool = True,
exp_postprocessing: bool = False,
extra_include_paths: path_list = (),
):
"""
Retrieves similarities for code clone detection. Those can be reused in
another bear to produce results.
Postprocessing may be done because small functions are less likely to
be clones at the same difference value than big functions which may
provide a better refactoring opportunity for the user.
:param counting_conditions: A comma seperated list of counting
conditions. Possible values are: used,
returned, is_condition, in_condition,
in_second_level_condition,
in_third_level_condition, is_assignee,
is_assigner, loop_content,
second_level_loop_content,
third_level_loop_content, is_param,
in_sum, in_product, in_binary_operation,
member_accessed.
Weightings can be assigned to each
condition due to providing a dict
value, i.e. having used weighted in
half as much as other conditions would
simply be: "used: 0.5, is_assignee".
Weightings default to 1 if unset.
:param average_calculation: If set to true the difference calculation
function will take the average of all
variable differences as the difference,
else it will normalize the function as a
whole and thus weighting in variables
dependent on their size.
:param poly_postprocessing: If set to true, the difference value of big
function pairs will be reduced using a
polynomial approach.
:param extra_include_paths: A list containing additional include paths.
:param exp_postprocessing: If set to true, the difference value of big
function pairs will be reduced using an
exponential approach.
"""
self.debug('Using the following counting conditions:')
for key, val in counting_conditions.items():
self.debug(' *', key.__name__, '(weighting: {})'.format(val))
self.debug('Creating count matrices...')
count_matrices = get_count_matrices(
ClangCountVectorCreator(list(counting_conditions.keys()),
list(counting_conditions.values())),
list(self.file_dict.keys()),
lambda prog: self.debug('{:2.4f}%...'.format(prog)),
self.section['files'].origin,
collect_dirs(extra_include_paths))
self.debug('Calculating differences...')
differences = []
function_count = len(count_matrices)
# Thats n over 2, hardcoded to simplify calculation
combination_length = function_count * (function_count-1) / 2
partial_get_difference = functools.partial(
get_difference,
count_matrices=count_matrices,
average_calculation=average_calculation,
poly_postprocessing=poly_postprocessing,
exp_postprocessing=exp_postprocessing)
for i, elem in enumerate(
map(partial_get_difference,
[(f1, f2) for f1, f2 in combinations(count_matrices, 2)])):
if i % 50 == 0:
self.debug('{:2.4f}%...'.format(100*i/combination_length))
differences.append(elem)
yield ClangFunctionDifferenceResult(self, differences, count_matrices)
| refeed/coala-bears | bears/c_languages/codeclone_detection/ClangFunctionDifferenceBear.py | Python | agpl-3.0 | 8,616 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumIntegerField
from jsonfield.fields import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.notify.enums import Priority, RecipientType
class NotificationManager(models.Manager):
def for_user(self, user):
"""
:type user: django.contrib.auth.models.AbstractUser
"""
if not user or user.is_anonymous():
return self.none()
q = (Q(recipient_type=RecipientType.SPECIFIC_USER) & Q(recipient=user))
if getattr(user, 'is_superuser', False):
q |= Q(recipient_type=RecipientType.ADMINS)
return self.filter(q)
def unread_for_user(self, user):
return self.for_user(user).exclude(marked_read=True)
class Notification(models.Model):
"""
A model for persistent notifications to be shown in the admin, etc.
"""
recipient_type = EnumIntegerField(RecipientType, default=RecipientType.ADMINS, verbose_name=_('recipient type'))
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('recipient')
)
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_('created on'))
message = models.CharField(max_length=140, editable=False, default="", verbose_name=_('message'))
identifier = InternalIdentifierField(unique=False)
priority = EnumIntegerField(Priority, default=Priority.NORMAL, db_index=True, verbose_name=_('priority'))
_data = JSONField(blank=True, null=True, editable=False, db_column="data")
marked_read = models.BooleanField(db_index=True, editable=False, default=False, verbose_name=_('marked read'))
marked_read_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, editable=False, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('marked read by')
)
marked_read_on = models.DateTimeField(null=True, blank=True, verbose_name=_('marked read on'))
objects = NotificationManager()
def __init__(self, *args, **kwargs):
url = kwargs.pop("url", None)
super(Notification, self).__init__(*args, **kwargs)
if url:
self.url = url
def save(self, *args, **kwargs):
if self.recipient_type == RecipientType.SPECIFIC_USER and not self.recipient_id:
raise ValueError("With RecipientType.SPECIFIC_USER, recipient is required")
super(Notification, self).save(*args, **kwargs)
def mark_read(self, user):
if self.marked_read:
return False
self.marked_read = True
self.marked_read_by = user
self.marked_read_on = now()
self.save(update_fields=('marked_read', 'marked_read_by', 'marked_read_on'))
return True
@property
def is_read(self):
return self.marked_read
@property
def data(self):
if not self._data:
self._data = {}
return self._data
@property
def url(self):
url = self.data.get("_url")
if isinstance(url, dict):
return reverse(**url)
return url
@url.setter
def url(self, value):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
self.data["_url"] = value
def set_reverse_url(self, **reverse_kwargs):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
try:
reverse(**reverse_kwargs)
except NoReverseMatch: # pragma: no cover
raise ValueError("Invalid reverse URL parameters")
self.data["_url"] = reverse_kwargs
| suutari/shoop | shuup/notify/models/notification.py | Python | agpl-3.0 | 4,222 |
# Copyright 2014-2017 Pedro M. Baeza <[email protected]>
# Copyright 2018-2019 Sergio Teruel <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models
from odoo.fields import first
class StockPicking(models.Model):
_inherit = 'stock.picking'
returned_ids = fields.Many2many(
comodel_name="stock.picking", compute="_compute_returned_ids",
string="Returned pickings")
source_picking_id = fields.Many2one(
comodel_name="stock.picking",
compute="_compute_source_picking_id",
string="Source picking")
@api.multi
def _compute_returned_ids(self):
for picking in self:
picking.returned_ids = picking.mapped(
'move_lines.returned_move_ids.picking_id')
@api.depends('move_lines.origin_returned_move_id')
def _compute_source_picking_id(self):
"""Get source piking from this picking. Only one origin is possible.
"""
for picking in self:
picking.source_picking_id = first(picking.mapped(
'move_lines.origin_returned_move_id.picking_id'))
def action_show_source_picking(self):
""" Open source picking form action """
return self.source_picking_id.get_formview_action()
| Vauxoo/stock-logistics-workflow | stock_picking_show_return/models/stock_picking.py | Python | agpl-3.0 | 1,320 |
import shutil
from pprint import pprint
import pandas as pd
import csv
import pickle
import inspect, os
import requests
from os import listdir
import numpy as np
import subprocess
from luigi import six
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB | felipegerard/arte_mexicano_antiguo | montactuaria/Analisis_access_log/luigi/ functions/functions.py | Python | agpl-3.0 | 362 |
# -*- coding: utf-8 -*-
# Copyright 2015-2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import mock
from odoo.addons.connector_carepoint.models import account_invoice_line
from ..common import SetUpCarepointBase
model = 'odoo.addons.connector_carepoint.models.account_invoice_line'
class EndTestException(Exception):
pass
class AccountInvoiceLineTestBase(SetUpCarepointBase):
def setUp(self):
super(AccountInvoiceLineTestBase, self).setUp()
self.model = 'carepoint.account.invoice.line'
self.mock_env = self.get_carepoint_helper(
self.model
)
@property
def record(self):
""" Model record fixture """
return {
'rxdisp_id': 12345,
'primary_pay_date': '2016-01-23 01:23:45',
't_patient_pay_sub': '10.23',
}
class TestAccountInvoiceLineUnit(AccountInvoiceLineTestBase):
def setUp(self):
super(TestAccountInvoiceLineUnit, self).setUp()
self.Unit = account_invoice_line.AccountInvoiceLineUnit
self.unit = self.Unit(self.mock_env)
def test_import_invoice_lines_for_procurement_unit_for_adapter(self):
""" It should get unit for adapter """
with mock.patch.object(self.unit, 'unit_for') as mk:
mk.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._import_invoice_lines_for_procurement(True)
mk.assert_called_once_with(
account_invoice_line.CarepointCRUDAdapter
)
def test_import_invoice_lines_for_procurement_unit_for_importer(self):
""" It should get unit for importer """
with mock.patch.object(self.unit, 'unit_for') as mk:
mk.side_effect = [None, EndTestException]
with self.assertRaises(EndTestException):
self.unit._import_invoice_lines_for_procurement(True)
mk.assert_called_with(
account_invoice_line.AccountInvoiceLineImporter
)
def test_import_invoice_lines_for_procurement_search(self):
""" It should search adapter for unit """
expect = 'expect'
with mock.patch.object(self.unit, 'unit_for') as mk:
mk().search.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._import_invoice_lines_for_procurement(expect)
mk().search.assert_called_once_with(
rxdisp_id=expect,
)
def test_import_invoice_lines_for_procurement_imports(self):
""" It should run importer on records """
with mock.patch.object(self.unit, 'unit_for') as mk:
expect = mock.MagicMock()
adapter = mock.MagicMock()
adapter.search.return_value = [True]
mk.side_effect = [adapter, expect]
expect.run.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._import_invoice_lines_for_procurement(True)
expect.run.assert_called_once_with(
adapter.search()[0]
)
class TestAccountInvoiceLineImportMapper(AccountInvoiceLineTestBase):
def setUp(self):
super(TestAccountInvoiceLineImportMapper, self).setUp()
self.Unit = account_invoice_line.AccountInvoiceLineImportMapper
self.unit = self.Unit(self.mock_env)
def test_carepoint_id(self):
""" It should return correct attribute """
expect = {'carepoint_id': self.record['rxdisp_id']}
res = self.unit.carepoint_id(self.record)
self.assertDictEqual(expect, res)
def test_invoice_id_get_binder(self):
""" It should get binder for record type """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.invoice_id(self.record)
self.unit.binder_for.assert_called_once_with(
'carepoint.procurement.order'
)
def test_invoice_id_to_odoo(self):
""" It should get Odoo record for binding """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for().to_odoo.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.invoice_id(self.record)
self.unit.binder_for().to_odoo.assert_called_once_with(
self.record['rxdisp_id'], browse=True,
)
def test_invoice_id_search(self):
""" It should search for invoice from origin """
with mock.patch.object(self.unit, 'binder_for'):
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.side_effect = EndTestException
proc_id = self.unit.binder_for().to_odoo()
with self.assertRaises(EndTestException):
self.unit.invoice_id(self.record)
env['account.invoice'].search.assert_called_once_with(
[('origin', '=', proc_id.sale_line_id.order_id.name)],
limit=1,
)
def test_invoice_id_existing_invoice(self):
""" It should return existing matches invoice """
expect = mock.MagicMock()
with mock.patch.object(self.unit, 'binder_for'):
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.return_value = [expect]
res = self.unit.invoice_id(self.record)
expect = {
'invoice_id': expect.id,
}
self.assertDictEqual(res, expect)
def test_invoice_id_new_invoice_prepare_invoice(self):
""" It should prepare invoice from sale order if not existing """
with mock.patch.object(self.unit, 'binder_for') as mk:
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.return_value = []
prep = mk().to_odoo().sale_line_id.order_id._prepare_invoice
prep.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.invoice_id(self.record)
def test_invoice_id_new_invoice_updates_invoice_date(self):
""" It should inject remote date into invoice vals """
with mock.patch.object(self.unit, 'binder_for') as mk:
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.return_value = []
prep = mk().to_odoo().sale_line_id.order_id._prepare_invoice
self.unit.invoice_id(self.record)
prep().update.assert_called_once_with({
'date_invoice': self.record['primary_pay_date'],
})
def test_invoice_id_new_invoice_create(self):
""" It should create invoice with proper vals """
with mock.patch.object(self.unit, 'binder_for') as mk:
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.return_value = []
prep = mk().to_odoo().sale_line_id.order_id._prepare_invoice
self.unit.invoice_id(self.record)
env['account.invoice'].create.assert_called_once_with(prep())
def test_invoice_id_new_invoice_create_return(self):
""" It should return result of create in values """
with mock.patch.object(self.unit, 'binder_for'):
with mock.patch.object(self.unit.session, 'env') as env:
env['account.invoice'].search.return_value = []
res = self.unit.invoice_id(self.record)
expect = {'invoice_id': env['account.invoice'].create().id}
self.assertDictEqual(expect, res)
def test_sale_line_ids_get_binder(self):
""" It should get binder for record type """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.sale_line_ids(self.record)
self.unit.binder_for.assert_called_once_with(
'carepoint.procurement.order'
)
def test_sale_line_ids_to_odoo(self):
""" It should get Odoo record for binding """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for().to_odoo.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.sale_line_ids(self.record)
self.unit.binder_for().to_odoo.assert_called_once_with(
self.record['rxdisp_id'], browse=True,
)
def test_sale_line_ids_return(self):
""" It should return proper values dict """
with mock.patch.object(self.unit, 'binder_for') as mk:
res = self.unit.sale_line_ids(self.record)
expect = {
'sale_line_ids': [(6, 0, [mk().to_odoo().sale_line_id.id])]
}
self.assertDictEqual(expect, res)
def test_invoice_line_data_get_binder(self):
""" It should get binder for record type """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.invoice_line_data(self.record)
self.unit.binder_for.assert_called_once_with(
'carepoint.procurement.order'
)
def test_invoice_line_data_to_odoo(self):
""" It should get Odoo record for binding """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for().to_odoo.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit.invoice_line_data(self.record)
self.unit.binder_for().to_odoo.assert_called_once_with(
self.record['rxdisp_id'], browse=True,
)
def test_invoice_line_data_sets_price_unit(self):
""" It should set the price_unit on sale line to circumvent lack
of price data in the remote sales records
"""
qty = 20
with mock.patch.object(self.unit, 'binder_for'):
line_id = self.unit.binder_for().to_odoo().sale_line_id
line_id.product_uom_qty = qty
self.unit.invoice_line_data(self.record)
self.assertEqual(
float(self.record['t_patient_pay_sub']) / qty,
line_id.price_unit
)
def test_invoice_line_data_prepares_invoice_line(self):
""" It should prepare invoice line based on sale line """
qty = 20
with mock.patch.object(self.unit, 'binder_for'):
line_id = self.unit.binder_for().to_odoo().sale_line_id
line_id.product_uom_qty = qty
self.unit.invoice_line_data(self.record)
line_id._prepare_invoice_line.assert_called_once_with(qty)
def test_invoice_line_data_return(self):
""" It should prepare invoice line based on sale line """
qty = 20
with mock.patch.object(self.unit, 'binder_for'):
line_id = self.unit.binder_for().to_odoo().sale_line_id
line_id.product_uom_qty = qty
res = self.unit.invoice_line_data(self.record)
self.assertEqual(line_id._prepare_invoice_line(), res)
class TestAccountInvoiceLineImporter(AccountInvoiceLineTestBase):
def setUp(self):
super(TestAccountInvoiceLineImporter, self).setUp()
self.Unit = account_invoice_line.AccountInvoiceLineImporter
self.unit = self.Unit(self.mock_env)
self.unit.carepoint_record = self.record
def test_import_dependencies(self):
""" It should import all depedencies """
with mock.patch.object(self.unit, '_import_dependency') as mk:
self.unit._import_dependencies()
mk.assert_has_calls([
mock.call(
self.record['rxdisp_id'],
'carepoint.procurement.order',
),
])
def test_after_import_get_binder_procurement(self):
""" It should get binder for record type """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.binder_for.assert_called_once_with(
'carepoint.procurement.order'
)
def test_after_import_to_odoo_procurement(self):
""" It should get Odoo record for binding """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for().to_odoo.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.binder_for().to_odoo.assert_called_once_with(
self.record['rxdisp_id'], browse=True,
)
def test_after_import_get_binder_sale(self):
""" It should get binder for record type """
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for.side_effect = [mock.MagicMock(),
EndTestException]
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.binder_for.assert_called_with(
'carepoint.sale.order'
)
def test_after_import_to_backend_sale(self):
""" It should get backend record for binding """
proc = mock.MagicMock()
with mock.patch.object(self.unit, 'binder_for'):
self.unit.binder_for().to_odoo.return_value = proc
self.unit.binder_for().to_backend.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.binder_for().to_backend.assert_called_with(
proc.sale_line_id.order_id.id,
)
def test_after_import_gets_proc_unit(self):
""" It should get unit for model """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT
):
self.unit.unit_for.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.unit_for.assert_called_with(
account_invoice_line.ProcurementOrderUnit,
model='carepoint.procurement.order',
)
def test_after_import_gets_order_line_cnt(self):
""" It should get count of order lines for sale """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT
):
self.unit.unit_for()._get_order_line_count.side_effect = \
EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.unit_for()._get_order_line_count.assert_called_with(
self.unit.binder_for().to_backend()
)
def test_after_import_gets_ref_for_cp_state(self):
""" It should get reference for carepoint state record """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT,
session=mock.DEFAULT, _get_binding=mock.DEFAULT,
):
invoice_id = self.unit._get_binding().invoice_id
self.unit.unit_for()._get_order_line_count.return_value = 1
invoice_id.invoice_line_ids = [True]
self.unit.env.ref.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
self.unit.env.ref.assert_called_with(
'connector_carepoint.state_%d' % (
self.unit.binder_for().to_odoo().sale_line_id.
order_id.carepoint_order_state_cn
)
)
def test_after_import_invoice_write_new_state(self):
""" It should write to invoice new states provided by remote system """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT,
session=mock.DEFAULT, _get_binding=mock.DEFAULT,
):
invoice_id = self.unit._get_binding().invoice_id
self.unit.unit_for()._get_order_line_count.return_value = 1
invoice_id.invoice_line_ids = [True]
invoice_id.write.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
invoice_id.write.assert_called_once_with({
'state': self.unit.env.ref().invoice_state,
})
def test_after_import_invoice_create_moves(self):
""" It should create accounting moves for newly paid invoices """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT,
session=mock.DEFAULT, _get_binding=mock.DEFAULT,
):
invoice_id = self.unit._get_binding().invoice_id
self.unit.unit_for()._get_order_line_count.return_value = 1
invoice_id.invoice_line_ids = [True]
self.unit.env.ref().invoice_state = 'paid'
invoice_id.action_move_create.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
def test_after_import_invoice_validate(self):
""" It should validate newly paid invoices """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT,
session=mock.DEFAULT, _get_binding=mock.DEFAULT,
):
invoice_id = self.unit._get_binding().invoice_id
self.unit.unit_for()._get_order_line_count.return_value = 1
invoice_id.invoice_line_ids = [True]
self.unit.env.ref().invoice_state = 'paid'
invoice_id.invoice_validate.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
def test_after_import_invoice_validate(self):
""" It should pay and reconcile invoices when residual on invoice """
with mock.patch.multiple(
self.unit, binder_for=mock.DEFAULT, unit_for=mock.DEFAULT,
session=mock.DEFAULT, _get_binding=mock.DEFAULT,
):
invoice_id = self.unit._get_binding().invoice_id
invoice_id.residual = 1
self.unit.unit_for()._get_order_line_count.return_value = 1
invoice_id.invoice_line_ids = [True]
self.unit.env.ref().invoice_state = 'paid'
invoice_id.pay_and_reconcile.side_effect = EndTestException
with self.assertRaises(EndTestException):
self.unit._after_import(self.record)
invoice_id.pay_and_reconcile.assert_called_once_with(
self.unit.backend_record.default_payment_journal,
date=invoice_id.date_invoice,
)
| laslabs/odoo-connector-carepoint | connector_carepoint/tests/models/test_account_invoice_line.py | Python | agpl-3.0 | 19,528 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import api, fields, models
from openerp.exceptions import UserError
class PersonManagement(models.Model):
_name = 'myo.person.mng'
name = fields.Char('Name', required=True)
alias = fields.Char('Alias', help='Common name that the Person is referred.')
code = fields.Char(string='Person Code', required=False)
notes = fields.Text(string='Notes')
date_inclusion = fields.Datetime("Inclusion Date", required=False, readonly=False,
default=lambda *a: datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
batch_name = fields.Char('Batch Name', required=False)
country_id_2 = fields.Many2one('res.country', 'Nationality')
birthday = fields.Date("Date of Birth")
age = fields.Char(
string='Age',
compute='_compute_age',
store=True
)
estimated_age = fields.Char(string='Estimated Age', required=False)
spouse_name = fields.Char('Spouse Name')
spouse_id = fields.Many2one('myo.person', 'Spouse', ondelete='restrict')
father_name = fields.Char('Father Name')
father_id = fields.Many2one('myo.person', 'Father', ondelete='restrict')
mother_name = fields.Char('Mother Name')
mother_id = fields.Many2one('myo.person', 'Mother', ondelete='restrict')
responsible_name = fields.Char('Responsible Name')
responsible_id = fields.Many2one('myo.person', 'Responsible', ondelete='restrict')
identification_id = fields.Char('Person ID')
otherid = fields.Char('Other ID')
gender = fields.Selection(
[('M', 'Male'),
('F', 'Female')
], 'Gender'
)
marital = fields.Selection(
[('single', 'Single'),
('married', 'Married'),
('widower', 'Widower'),
('divorced', 'Divorced'),
], 'Marital Status'
)
active = fields.Boolean('Active',
help="If unchecked, it will allow you to hide the person without removing it.",
default=1)
person_id = fields.Many2one('myo.person', 'Person')
_order = 'name'
_sql_constraints = [
('code_uniq',
'UNIQUE(code)',
u'Error! The Person Code must be unique!'
)
]
@api.multi
@api.constrains('birthday')
def _check_birthday(self):
for person in self:
if person.birthday > fields.Date.today():
raise UserError(u'Error! Date of Birth must be in the past!')
@api.one
@api.depends('birthday')
def _compute_age(self):
now = datetime.now()
if self.birthday:
dob = datetime.strptime(self.birthday, '%Y-%m-%d')
delta = relativedelta(now, dob)
# self.age = str(delta.years) + "y " + str(delta.months) + "m " + str(delta.days) + "d"
self.age = str(delta.years)
else:
self.age = "No Date of Birth!"
| MostlyOpen/odoo_addons | myo_person_mng/models/person_mng.py | Python | agpl-3.0 | 3,872 |
""" Form widget classes """
from __future__ import absolute_import
from django.conf import settings
from django.forms.utils import flatatt
from django.forms.widgets import CheckboxInput
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangolib.markup import HTML, Text
class TermsOfServiceCheckboxInput(CheckboxInput):
""" Renders a checkbox with a label linking to the terms of service. """
def render(self, name, value, attrs=None):
extra_attrs = attrs.copy()
extra_attrs.update({'type': 'checkbox', 'name': name})
final_attrs = self.build_attrs(self.attrs, extra_attrs=extra_attrs)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
# Translators: link_start and link_end are HTML tags for a link to the terms of service.
# platform_name is the name of this Open edX installation.
label = Text(_(
u'I, and my organization, accept the {link_start}{platform_name} API Terms of Service{link_end}.'
)).format(
platform_name=configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
link_start=HTML(u'<a href="{url}" rel="noopener" target="_blank">').format(
url=reverse('api_admin:api-tos')
),
link_end=HTML('</a>'),
)
html = HTML(u'<input{{}} /> <label class="tos-checkbox-label" for="{id}">{label}</label>').format(
id=final_attrs['id'],
label=label
)
return format_html(html, flatatt(final_attrs))
| ESOedX/edx-platform | openedx/core/djangoapps/api_admin/widgets.py | Python | agpl-3.0 | 1,985 |
# -*- coding: utf-8 -*-
import pandas as pd
import sys
from builtins import str as text
from utils import find_zipcode, str2date
header_mapping = {
'origin': 'ORIGIN',
'company_name': 'LABO',
'lastname_firstname': 'BENEF_PS_QUALITE_NOM_PRENOM',
'address': 'BENEF_PS_ADR',
'job': 'BENEF_PS_QUALIFICATION',
'rpps': 'BENEF_PS_RPPS',
'value': 'DECL_AVANT_MONTANT',
'date': 'DECL_AVANT_DATE',
'kind': 'DECL_AVANT_NATURE',
'BENEF_PS_CODEPOSTAL': 'BENEF_PS_CODEPOSTAL'
}
input_filename = sys.argv[1]
output_filename = sys.argv[2]
df = pd.read_csv(input_filename, encoding='utf-8')
df['lastname_firstname'] = df['name'] + ' ' + df['firstname']
df['origin'] = 'Pharmacien'
df['date'] = df['date'].apply(str2date)
df['BENEF_PS_CODEPOSTAL'] = df['address'].apply(find_zipcode)
for origin, target in header_mapping.items():
df[target] = df[origin]
df[target] = df[target].apply(text).apply(lambda s: s.replace(',', '- ').replace('"', ''))
df[list(header_mapping.values())].to_csv(output_filename, index=False, encoding='utf-8')
| regardscitoyens/sunshine-data | scripts/format_pharmaciens.py | Python | agpl-3.0 | 1,072 |
# coding: utf-8
# maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey
# Copyright (C) 2009 Pierre Mauduit
# Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# Copyright (C) 2019 Hartmut Holzgraefe
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Views for MapOSMatic
import datetime
import logging
import json
import os
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponseBadRequest, HttpResponseNotFound, HttpResponse, Http404
from django.db.transaction import TransactionManagementError
from django.shortcuts import get_object_or_404, render_to_response, render
from django.template import RequestContext
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core import serializers
from django.forms.models import model_to_dict
from django.core.exceptions import ValidationError
from django.urls import get_script_prefix
from django.db import connections
from django.utils.safestring import mark_safe
import ocitysmap
from www.maposmatic import helpers, forms, nominatim, models
import www.settings
import psycopg2
LOG = logging.getLogger('maposmatic')
def index(request):
"""The main page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/index.html',
{ 'form': form,
'queued': job_list.count()
}
)
def about(request):
"""The about page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/about.html',
{ 'form': form,
'queued': job_list.count()
}
)
def privacy(request):
"""The privacy statement page."""
return render(request,
'maposmatic/privacy.html',
{ }
)
def documentation_user_guide(request):
"""The user guide page."""
return render(request,
'maposmatic/documentation-user-guide.html',
{ }
)
def documentation_api(request):
"""The api documentation."""
return render(request,
'maposmatic/documentation-api.html',
{ }
)
def donate(request):
"""The donate page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/donate.html',
{ 'form': form,
'queued': job_list.count()
}
)
def donate_thanks(request):
"""The thanks for donation page."""
return render_to_response('maposmatic/donate-thanks.html')
def create_upload_file(job, file):
first_line = file.readline().decode("utf-8-sig")
LOG.info("firstline type %s" % type(first_line))
if first_line.startswith(u'<?xml'):
file_type = 'gpx'
else:
file_type = 'umap'
file_instance = models.UploadFile(uploaded_file = file, file_type = file_type)
file_instance.save()
file_instance.job.add(job)
def new(request):
"""The map creation page and form."""
papersize_buttons = ''
if request.method == 'POST':
form = forms.MapRenderingJobForm(request.POST, request.FILES)
if form.is_valid():
request.session['new_layout'] = form.cleaned_data.get('layout')
request.session['new_stylesheet'] = form.cleaned_data.get('stylesheet')
request.session['new_overlay'] = form.cleaned_data.get('overlay')
request.session['new_paper_width_mm'] = form.cleaned_data.get('paper_width_mm')
request.session['new_paper_height_mm'] = form.cleaned_data.get('paper_height_mm')
job = form.save(commit=False)
job.administrative_osmid = form.cleaned_data.get('administrative_osmid')
job.stylesheet = form.cleaned_data.get('stylesheet')
job.overlay = ",".join(form.cleaned_data.get('overlay'))
job.layout = form.cleaned_data.get('layout')
job.paper_width_mm = form.cleaned_data.get('paper_width_mm')
job.paper_height_mm = form.cleaned_data.get('paper_height_mm')
job.status = 0 # Submitted
if www.settings.SUBMITTER_IP_LIFETIME != 0:
job.submitterip = request.META['REMOTE_ADDR']
else:
job.submitterip = None
job.submitteremail = form.cleaned_data.get('submitteremail')
job.map_language = form.cleaned_data.get('map_language')
job.index_queue_at_submission = (models.MapRenderingJob.objects
.queue_size())
job.nonce = helpers.generate_nonce(models.MapRenderingJob.NONCE_SIZE)
job.save()
files = request.FILES.getlist('uploadfile')
for file in files:
create_upload_file(job, file)
return HttpResponseRedirect(reverse('map-by-id-and-nonce',
args=[job.id, job.nonce]))
else:
LOG.debug("FORM NOT VALID")
else:
init_vals = request.GET.dict()
oc = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
if not 'layout' in init_vals and 'new_layout' in request.session :
init_vals['layout'] = request.session['new_layout']
else:
request.session['new_layout'] = oc.get_all_renderer_names()[0]
if not 'stylesheet' in init_vals and 'new_stylesheet' in request.session:
init_vals['stylesheet'] = request.session['new_stylesheet']
else:
request.session['new_stylesheet'] = oc.get_all_style_names()[0]
if not 'overlay' in init_vals and 'new_overlay' in request.session:
init_vals['overlay'] = request.session['new_overlay']
if not 'paper_width_mm' in init_vals and 'new_paper_width_mm' in request.session:
init_vals['paper_width_mm'] = request.session['new_paper_width_mm']
if not 'paper_height_mm' in init_vals and 'new_paper_width_mm' in request.session:
init_vals['paper_height_mm'] = request.session['new_paper_height_mm']
form = forms.MapRenderingJobForm(initial=init_vals)
_ocitysmap = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
# TODO: create tempates for these button lines ...
papersize_buttons += "<p><button id='paper_best_fit' type='button' class='btn btn-primary papersize papersize_best_fit' onclick='set_papersize(0,0);'><i class='fas fa-square fa-2x'></i></button> <b>Best fit</b> (<span id='best_width'>?</span>×<span id='best_height'>?</span>mm²)</p>"
for p in _ocitysmap.get_all_paper_sizes():
if p[1] is not None:
papersize_buttons += "<p>"
if p[1] != p[2]:
papersize_buttons += "<button id='paper_{0}_{1}' type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-portrait fa-2x'></i></button> ".format(p[1], p[2])
papersize_buttons += "<button id='paper_{0}_{1}' type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-image fa-2x'></i></button> ".format(p[2], p[1])
else:
papersize_buttons += "<button id='paper_{0}_{1}' disabled type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-square fa-2x'></i></button> ".format(p[1], p[2])
papersize_buttons += "<b>%s</b> (%s×%smm²)</p>" % (p[0], repr(p[1]), repr(p[2]))
multisize_buttons = ''
for p in _ocitysmap.get_all_paper_sizes('multipage'):
if p[1] is not None:
multisize_buttons += "<p>"
if p[1] != p[2]:
multisize_buttons += "<button id='multipaper_{0}_{1}' type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-portrait fa-2x'></i></button> ".format(p[1], p[2])
multisize_buttons += "<button id='multipaper_{0}_{1}' type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-image fa-2x'></i></button> ".format(p[2], p[1])
else:
multisize_buttons += "<button id='multipaper_{0}_{1}' disabled type='button' class='btn btn-primary papersize papersize_{0}_{1}' onclick='set_papersize({0}, {1});'><i class='fas fa-square fa-2x'></i></button> ".format(p[1], p[2])
multisize_buttons += "<b>%s</b> (%s×%smm²)</p>" % (p[0], repr(p[1]), repr(p[2]))
return render(request, 'maposmatic/new.html',
{ 'form' : form ,
'papersize_suggestions': mark_safe(papersize_buttons),
'multipage_papersize_suggestions': mark_safe(multisize_buttons),
})
def map_full(request, id, nonce=None):
"""The full-page map details page.
Args:
id (int): the job ID in the database.
"""
job = get_object_or_404(models.MapRenderingJob, id=id)
isredirected = request.session.get('redirected', False)
request.session.pop('redirected', None)
queue_size = models.MapRenderingJob.objects.queue_size()
progress = 100
if queue_size:
progress = 20 + int(80 * (queue_size -
job.current_position_in_queue()) / float(queue_size))
refresh = job.is_rendering() and \
www.settings.REFRESH_JOB_RENDERING or \
www.settings.REFRESH_JOB_WAITING
return render(request, 'maposmatic/map-full.html',
{ 'map': job, 'redirected': isredirected,
'nonce': nonce, 'refresh': refresh,
'progress': progress, 'queue_size': queue_size })
def maps(request, category=None):
"""Displays all maps and jobs, sorted by submission time, or maps matching
the search terms when provided."""
map_list = None
form = forms.MapSearchForm(request.GET)
if form.is_valid():
map_list = (models.MapRenderingJob.objects
.order_by('-submission_time')
.filter(maptitle__icontains=form.cleaned_data['query']))
if len(map_list) == 1:
return HttpResponseRedirect(reverse('map-by-id',
args=[map_list[0].id]))
else:
form = forms.MapSearchForm()
if map_list is None:
map_list = (models.MapRenderingJob.objects
.order_by('-submission_time'))
if category == 'errors':
map_list = map_list.filter(status=2).exclude(resultmsg='ok')
paginator = Paginator(map_list, www.settings.ITEMS_PER_PAGE)
try:
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
maps = paginator.page(page)
except (EmptyPage, InvalidPage):
maps = paginator.page(paginator.num_pages)
return render(request, 'maposmatic/maps.html',
{ 'maps': maps, 'form': form,
'is_search': form.is_valid(),
'pages': helpers.get_pages_list(maps, paginator) })
def recreate(request):
if request.method == 'POST':
form = forms.MapRecreateForm(request.POST)
if form.is_valid():
job = get_object_or_404(models.MapRenderingJob,
id=form.cleaned_data['id'])
newjob = models.MapRenderingJob()
newjob.maptitle = job.maptitle
newjob.administrative_city = job.administrative_city
newjob.administrative_osmid = job.administrative_osmid
newjob.lat_upper_left = job.lat_upper_left
newjob.lon_upper_left = job.lon_upper_left
newjob.lat_bottom_right = job.lat_bottom_right
newjob.lon_bottom_right = job.lon_bottom_right
newjob.stylesheet = job.stylesheet
newjob.overlay = job.overlay
newjob.layout = job.layout
newjob.paper_width_mm = job.paper_width_mm
newjob.paper_height_mm = job.paper_height_mm
newjob.status = 0 # Submitted
if www.settings.SUBMITTER_IP_LIFETIME != 0:
newjob.submitterip = request.META['REMOTE_ADDR']
else:
newjob.submitterip = None
newjob.submittermail = None # TODO
newjob.map_language = job.map_language
newjob.index_queue_at_submission = (models.MapRenderingJob.objects
.queue_size())
newjob.nonce = helpers.generate_nonce(models.MapRenderingJob.NONCE_SIZE)
newjob.save()
for each in job.uploads.all():
each.job.add(newjob)
return HttpResponseRedirect(reverse('map-by-id-and-nonce',
args=[newjob.id, newjob.nonce]))
return HttpResponseBadRequest("ERROR: Invalid request")
def cancel(request):
if request.method == 'POST':
form = forms.MapCancelForm(request.POST)
if form.is_valid():
job = get_object_or_404(models.MapRenderingJob,
id=form.cleaned_data['id'],
nonce=form.cleaned_data['nonce'])
job.cancel()
return HttpResponseRedirect(reverse('map-by-id-and-nonce',
args=[job.id, job.nonce]))
return HttpResponseBadRequest("ERROR: Invalid request")
def api_nominatim(request):
"""Nominatim query gateway."""
exclude = request.GET.get('exclude', '')
squery = request.GET.get('q', '')
lang = None
if 'HTTP_ACCEPT_LANGUAGE' in request.META:
# Accept-Language headers typically look like
# fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3. Unfortunately,
# Nominatim behaves improperly with such a string: it gives
# the region name in French, but the country name in
# English. We split at the first comma to only keep the
# preferred language, which makes Nominatim work properly.
lang = request.META['HTTP_ACCEPT_LANGUAGE'].split(',')[0]
try:
contents = nominatim.query(squery, exclude, with_polygons=False,
accept_language=lang)
except Exception as e:
LOG.exception("Error querying Nominatim")
contents = []
return HttpResponse(content=json.dumps(contents),
content_type='text/json')
def api_nominatim_reverse(request, lat, lon):
"""Nominatim reverse geocoding query gateway."""
lat = float(lat)
lon = float(lon)
return HttpResponse(json.dumps(nominatim.reverse_geo(lat, lon)),
content_type='text/json')
def api_postgis_reverse(request, lat, lon):
lat = float(lat)
lon = float(lon)
cursor = None
query = """select country_code
from country_osm_grid
where st_contains(geometry,
st_geomfromtext('POINT(%f %f)', 4326))
""" % (lon, lat)
LOG.debug("Reverse Lookup Query %s" % query)
try:
connections['osm'].rollback() # make sure there's no pending transaction
cursor = connections['osm'].cursor()
cursor.execute(query)
country_code = cursor.fetchone()
cursor.close()
if country_code is None or len(country_code) < 1:
raise Http404("postgis: country not found")
return HttpResponse('{"address": {"country_code": "%s"}}' % country_code[0], content_type='text/json')
except Exception as e:
LOG.warning("reverse geo lookup failed: %s" % e)
pass
finally:
# Close the DB cursor if necessary
if cursor is not None and not cursor.closed:
cursor.close()
raise Http404("postgis: something went wrong")
def api_geosearch(request):
"""Simple place name search."""
exclude = request.GET.get('exclude', '')
squery = request.GET.get('q', '')
squery = squery.lower()
contents = { "entries": [] }
cursor = None
if www.settings.MAX_BOUNDING_BOX:
m = www.settings.MAX_BOUNDING_BOX
max_bbox = "ST_GeomFromText('POLYGON((%f %f, %f %f, %f %f, %f %f, %f %f))', 4326)" % (m[1], m[0], m[1], m[2], m[3], m[2], m[3], m[0], m[1], m[0])
pt_bbox = 'AND ST_Contains(ST_Transform(%s, 3857), pt.way)' % max_bbox
poly_bbox = 'AND ST_Contains(ST_Transform(%s, 3857), poly.way)' % max_bbox
else:
pt_bbox = ''
poly_bbox = ''
query = """SELECT p.name
, p.display_name
, p.class
, p.type
, p.osm_type
, p.osm_id
, p.lat
, p.lon
, p.west
, p.east
, p.north
, p.south
, p.place_rank
, p.importance
, p.country_code
FROM place p
LEFT JOIN planet_osm_hstore_point pt
ON p.osm_id = pt.osm_id
%s -- optionally filter by max bbox
LEFT JOIN planet_osm_hstore_polygon poly
ON - p.osm_id = poly.osm_id
%s -- optionally filter by max bbox
WHERE LOWER(p.name) = %%s
AND ( pt.osm_id IS NOT NULL
OR poly.osm_id IS NOT NULL
)
ORDER BY p.place_rank
, p.importance DESC
""" % (pt_bbox, poly_bbox)
try:
cursor = connections['osm'].cursor()
if cursor is None:
raise Http404("postgis: no cursor")
cursor.execute(query, [ squery ])
columns = [col[0] for col in cursor.description]
for row in cursor.fetchall():
values = dict(zip(columns, row))
values["boundingbox"] = "%f,%f,%f,%f" % (values["south"], values["north"], values["west"], values["east"])
bbox = ocitysmap.coords.BoundingBox(values["south"], values["west"], values["north"], values["east"])
(metric_size_lat, metric_size_lon) = bbox.spheric_sizes()
LOG.warning("metric lat/lon %f : %f - %f" % (metric_size_lat, metric_size_lon, www.settings.BBOX_MAXIMUM_LENGTH_IN_METERS))
if values["osm_type"] == "node":
values["icon"] = "../media/img/place-node.png"
values["ocitysmap_params"] = {
"valid": False,
"reason": "no-admin",
"reason_text": "No administrative boundary"
}
else:
values["icon"] = "../media/img/place-polygon.png"
if (metric_size_lat > www.settings.BBOX_MAXIMUM_LENGTH_IN_METERS
or metric_size_lon > www.settings.BBOX_MAXIMUM_LENGTH_IN_METERS):
valid = False
reason = "area-too-big"
reason_text = ugettext("Administrative area too big for rendering")
else:
valid = True
reason = ""
reason_text = ""
values["ocitysmap_params"] = {
"valid": valid,
"table": "polygon",
"id": -values["osm_id"],
"reason": reason,
"reason_text": reason_text
}
contents["entries"].append(values)
cursor.close()
return HttpResponse(content=json.dumps(contents),
content_type='text/json')
except Exception as e:
raise TransactionManagementError(e)
def api_papersize(request):
"""API handler to get the compatible paper sizes for the provided layout
and bounding box."""
if request.method != 'POST':
return HttpResponseBadRequest("ERROR: Bad request")
f = forms.MapPaperSizeForm(request.POST)
if not f.is_valid():
return HttpResponseBadRequest("ERROR: Invalid arguments")
renderer = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
osmid = f.cleaned_data.get('osmid')
layout = f.cleaned_data.get('layout')
stylesheet = renderer.get_stylesheet_by_name(
f.cleaned_data.get('stylesheet'))
# Determine geographic area
if osmid is not None:
try:
bbox_wkt, area_wkt = renderer.get_geographic_info(osmid)
except ValueError:
LOG.exception("Error determining compatible paper sizes")
raise
bbox = ocitysmap.coords.BoundingBox.parse_wkt(bbox_wkt)
else:
lat_upper_left = f.cleaned_data.get("lat_upper_left")
lon_upper_left = f.cleaned_data.get("lon_upper_left")
lat_bottom_right = f.cleaned_data.get("lat_bottom_right")
lon_bottom_right = f.cleaned_data.get("lon_bottom_right")
# Check we have correct floats
if (lat_upper_left == None or lon_upper_left == None
or lat_bottom_right == None or lon_bottom_right == None):
return HttpResponseBadRequest("ERROR: Invalid arguments")
bbox = ocitysmap.coords.BoundingBox(
lat_upper_left, lon_upper_left,
lat_bottom_right, lon_bottom_right)
renderer_cls = ocitysmap.renderers.get_renderer_class_by_name(layout)
paper_sizes = sorted(renderer_cls.get_compatible_paper_sizes(bbox, renderer),
key = lambda p: p['width'])
return HttpResponse(content=json.dumps(paper_sizes),
content_type='text/json')
def api_bbox(request, osm_id):
"""API handler that returns the bounding box from an OSM ID polygon."""
try:
osm_id = int(osm_id)
except ValueError:
return HttpResponseBadRequest("ERROR: Invalid arguments")
renderer = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
try:
bbox_wkt, area_wkt = renderer.get_geographic_info(osm_id)
bbox = ocitysmap.coords.BoundingBox.parse_wkt(bbox_wkt)
return HttpResponse(content=json.dumps(bbox.as_json_bounds()),
content_type='text/json')
except:
LOG.exception("Error calculating bounding box for OSM ID %d!" % osm_id)
return HttpResponseBadRequest("ERROR: OSM ID %d not found!" % osm_id)
def api_polygon(request, osm_id):
"""API handler that returns the polygon outline from an OSM ID polygon."""
try:
osm_id = int(osm_id)
except ValueError:
return HttpResponseBadRequest("ERROR: Invalid arguments")
renderer = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
try:
bbox_wkt, area_wkt = renderer.get_geographic_info(osm_id)
bbox = ocitysmap.coords.BoundingBox.parse_wkt(bbox_wkt).as_json_bounds()
return HttpResponse(content=json.dumps({'bbox': bbox, 'wkt': area_wkt}),
content_type='text/json')
except:
LOG.exception("Error retrieving polygon outline for OSM ID %d!" % osm_id)
return HttpResponseBadRequest("ERROR: OSM ID %d not found!" % osm_id)
def api_rendering_status(request, id, nonce=None):
"""API handler for updating map request rendering status"""
try:
id = int(id)
except ValueError:
return HttpResponseBadRequest("ERROR: Invalid arguments")
job = get_object_or_404(models.MapRenderingJob, id=id)
isredirected = request.session.get('redirected', False)
request.session.pop('redirected', None)
queue_size = models.MapRenderingJob.objects.queue_size()
progress = 100
if queue_size:
progress = 20 + int(80 * (queue_size -
job.current_position_in_queue()) / float(queue_size))
refresh = job.is_rendering() and \
www.settings.REFRESH_JOB_RENDERING or \
www.settings.REFRESH_JOB_WAITING
return render(request, 'maposmatic/map-full-parts/rendering-status.html',
{ 'map': job,
'redirected': isredirected,
'nonce': nonce,
'refresh': refresh,
'progress': progress,
'queue_size': queue_size,
'status': 'working'
})
| hholzgra/maposmatic | www/maposmatic/views.py | Python | agpl-3.0 | 26,128 |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from pathlib import Path
from flask import (Flask, session, redirect, url_for, flash, g, request,
render_template)
from flask_assets import Environment
from flask_babel import gettext
from flask_wtf.csrf import CSRFProtect, CSRFError
from os import path
import sys
from werkzeug.exceptions import default_exceptions
import i18n
import template_filters
import version
from crypto_util import CryptoUtil
from db import db
from journalist_app import account, admin, api, main, col
from journalist_app.utils import (get_source, logged_in,
JournalistInterfaceSessionInterface,
cleanup_expired_revoked_tokens)
from models import InstanceConfig, Journalist
from server_os import is_os_near_eol, is_os_past_eol
from store import Storage
import typing
# https://www.python.org/dev/peps/pep-0484/#runtime-or-type-checking
if typing.TYPE_CHECKING:
# flake8 can not understand type annotation yet.
# That is why all type annotation relative import
# statements has to be marked as noqa.
# http://flake8.pycqa.org/en/latest/user/error-codes.html?highlight=f401
from sdconfig import SDConfig # noqa: F401
from typing import Optional, Union, Tuple, Any # noqa: F401
from werkzeug import Response # noqa: F401
from werkzeug.exceptions import HTTPException # noqa: F401
_insecure_views = ['main.login', 'static']
def get_logo_url(app: Flask) -> str:
if not app.static_folder:
raise FileNotFoundError
custom_logo_filename = "i/custom_logo.png"
default_logo_filename = "i/logo.png"
custom_logo_path = Path(app.static_folder) / custom_logo_filename
default_logo_path = Path(app.static_folder) / default_logo_filename
if custom_logo_path.is_file():
return url_for("static", filename=custom_logo_filename)
elif default_logo_path.is_file():
return url_for("static", filename=default_logo_filename)
raise FileNotFoundError
def create_app(config: 'SDConfig') -> Flask:
app = Flask(__name__,
template_folder=config.JOURNALIST_TEMPLATES_DIR,
static_folder=path.join(config.SECUREDROP_ROOT, 'static'))
app.config.from_object(config.JOURNALIST_APP_FLASK_CONFIG_CLS)
app.session_interface = JournalistInterfaceSessionInterface()
csrf = CSRFProtect(app)
Environment(app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = config.DATABASE_URI
db.init_app(app)
app.config.update(OS_PAST_EOL=is_os_past_eol(), OS_NEAR_EOL=is_os_near_eol())
# TODO: Attaching a Storage dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.storage = Storage(config.STORE_DIR,
config.TEMP_DIR,
config.JOURNALIST_KEY)
# TODO: Attaching a CryptoUtil dynamically like this disables all type checking (and
# breaks code analysis tools) for code that uses current_app.storage; it should be refactored
app.crypto_util = CryptoUtil(
scrypt_params=config.SCRYPT_PARAMS,
scrypt_id_pepper=config.SCRYPT_ID_PEPPER,
scrypt_gpg_pepper=config.SCRYPT_GPG_PEPPER,
securedrop_root=config.SECUREDROP_ROOT,
nouns_file=config.NOUNS,
adjectives_file=config.ADJECTIVES,
gpg_key_dir=config.GPG_KEY_DIR,
)
@app.errorhandler(CSRFError)
def handle_csrf_error(e: CSRFError) -> 'Response':
# render the message first to ensure it's localized.
msg = gettext('You have been logged out due to inactivity.')
session.clear()
flash(msg, 'error')
return redirect(url_for('main.login'))
def _handle_http_exception(
error: 'HTTPException'
) -> 'Tuple[Union[Response, str], Optional[int]]':
# Workaround for no blueprint-level 404/5 error handlers, see:
# https://github.com/pallets/flask/issues/503#issuecomment-71383286
handler = list(app.error_handler_spec['api'][error.code].values())[0]
if request.path.startswith('/api/') and handler:
return handler(error)
return render_template('error.html', error=error), error.code
for code in default_exceptions:
app.errorhandler(code)(_handle_http_exception)
i18n.configure(config, app)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.jinja_env.globals['version'] = version.__version__
app.jinja_env.filters['rel_datetime_format'] = \
template_filters.rel_datetime_format
app.jinja_env.filters['filesizeformat'] = template_filters.filesizeformat
@app.before_first_request
def expire_blacklisted_tokens() -> None:
cleanup_expired_revoked_tokens()
@app.before_request
def load_instance_config() -> None:
app.instance_config = InstanceConfig.get_current()
@app.before_request
def setup_g() -> 'Optional[Response]':
"""Store commonly used values in Flask's special g object"""
if 'expires' in session and datetime.utcnow() >= session['expires']:
session.clear()
flash(gettext('You have been logged out due to inactivity.'),
'error')
uid = session.get('uid', None)
if uid:
user = Journalist.query.get(uid)
if user and 'nonce' in session and \
session['nonce'] != user.session_nonce:
session.clear()
flash(gettext('You have been logged out due to password change'),
'error')
session['expires'] = datetime.utcnow() + \
timedelta(minutes=getattr(config,
'SESSION_EXPIRATION_MINUTES',
120))
# Work around https://github.com/lepture/flask-wtf/issues/275
# -- after upgrading from Python 2 to Python 3, any existing
# session's csrf_token value will be retrieved as bytes,
# causing a TypeError. This simple fix, deleting the existing
# token, was suggested in the issue comments. This code will
# be safe to remove after Python 2 reaches EOL in 2020, and no
# supported SecureDrop installations can still have this
# problem.
if sys.version_info.major > 2 and type(session.get('csrf_token')) is bytes:
del session['csrf_token']
uid = session.get('uid', None)
if uid:
g.user = Journalist.query.get(uid)
i18n.set_locale(config)
if app.instance_config.organization_name:
g.organization_name = app.instance_config.organization_name
else:
g.organization_name = gettext('SecureDrop')
try:
g.logo = get_logo_url(app)
except FileNotFoundError:
app.logger.error("Site logo not found.")
if app.config["OS_PAST_EOL"]:
g.show_os_past_eol_warning = True
elif app.config["OS_NEAR_EOL"]:
g.show_os_near_eol_warning = True
if request.path.split('/')[1] == 'api':
pass # We use the @token_required decorator for the API endpoints
else: # We are not using the API
if request.endpoint not in _insecure_views and not logged_in():
return redirect(url_for('main.login'))
if request.method == 'POST':
filesystem_id = request.form.get('filesystem_id')
if filesystem_id:
g.filesystem_id = filesystem_id
g.source = get_source(filesystem_id)
return None
app.register_blueprint(main.make_blueprint(config))
app.register_blueprint(account.make_blueprint(config),
url_prefix='/account')
app.register_blueprint(admin.make_blueprint(config), url_prefix='/admin')
app.register_blueprint(col.make_blueprint(config), url_prefix='/col')
api_blueprint = api.make_blueprint(config)
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
csrf.exempt(api_blueprint)
return app
| conorsch/securedrop | securedrop/journalist_app/__init__.py | Python | agpl-3.0 | 8,230 |
class Beer(object):
def sing(self, first, last=0):
verses = ''
for number in reversed(range(last, first + 1)):
verses += self.verse(number) + '\n'
return verses
def verse(self, number):
return ''.join([
"%s of beer on the wall, " % self._bottles(number).capitalize(),
"%s of beer.\n" % self._bottles(number),
self._action(number),
self._next_bottle(number),
])
def _action(self, current_verse):
if current_verse == 0:
return "Go to the store and buy some more, "
else:
return "Take %s down and pass it around, " % (
"one" if current_verse > 1 else "it"
)
def _next_bottle(self, current_verse):
return "%s of beer on the wall.\n" % self._bottles(self._next_verse(current_verse))
def _bottles(self, number):
if number == 0:
return 'no more bottles'
if number == 1:
return '1 bottle'
else:
return '%d bottles' % number
def _next_verse(self, current_verse):
return current_verse - 1 if current_verse > 0 else 99
| mscoutermarsh/exercism_coveralls | assignments/python/beer-song/example.py | Python | agpl-3.0 | 1,182 |
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from __future__ import print_function
"Tests for classes in controls\common_controls.py"
__revision__ = "$Revision: 234 $"
import sys
import ctypes
import unittest
import time
import pprint
import pdb
import os
sys.path.append(".")
from pywinauto import six
from pywinauto.controls import common_controls
from pywinauto.controls.common_controls import *
from pywinauto.win32structures import RECT
from pywinauto.controls import WrapHandle
#from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import findbestmatch
from pywinauto.SendKeysCtypes import is_x64
from pywinauto.RemoteMemoryBlock import AccessDenied
from pywinauto.RemoteMemoryBlock import RemoteMemoryBlock
controlspy_folder = os.path.join(
os.path.dirname(__file__), "..\..\controlspy0998")
if is_x64():
controlspy_folder = os.path.join(controlspy_folder, 'x64')
class RemoteMemoryBlockTestCases(unittest.TestCase):
def test__init__fail(self):
self.assertRaises(AccessDenied, RemoteMemoryBlock, 0)
def test__init__fail(self):
self.assertRaises(AccessDenied, RemoteMemoryBlock, 0)
class ListViewTestCases(unittest.TestCase):
"Unit tests for the ListViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app_path = os.path.join(controlspy_folder, "List View.exe")
app.start_(app_path)
#print('app_path: ' + app_path)
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.ListView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always!
#app.ControlStyles.ListBox1.TypeKeys("{UP}" * 26 + "{SPACE}")
#self.app.ControlStyles.ListBox1.Select("LVS_SHOWSELALWAYS")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the ListView friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ListView")
def testColumnCount(self):
"Test the ListView ColumnCount method"
self.assertEquals (self.ctrl.ColumnCount(), 4)
def testItemCount(self):
"Test the ListView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 9)
def testItemText(self):
"Test the ListView item.Text property"
item = self.ctrl.GetItem(1)
self.assertEquals(item['text'], "Venus")
def testItems(self):
"Test the ListView Items method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
for i, item in enumerate(self.ctrl.Items()):
self.assertEquals(item['text'], flat_texts[i])
def testTexts(self):
"Test the ListView Texts method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
self.assertEquals(flat_texts, self.ctrl.Texts()[1:])
def testGetItem(self):
"Test the ListView GetItem method"
for row in range(self.ctrl.ItemCount()):
for col in range(self.ctrl.ColumnCount()):
self.assertEquals(
self.ctrl.GetItem(row, col)['text'], self.texts[row][col])
def testGetItemText(self):
"Test the ListView GetItem method - with text this time"
for text in [row[0] for row in self.texts]:
self.assertEquals(
self.ctrl.GetItem(text)['text'], text)
self.assertRaises(ValueError, self.ctrl.GetItem, "Item not in this list")
def testColumn(self):
"Test the ListView Columns method"
cols = self.ctrl.Columns()
self.assertEqual (len(cols), self.ctrl.ColumnCount())
# TODO: add more checking of column values
#for col in cols:
# print(col)
def testGetSelectionCount(self):
"Test the ListView GetSelectedCount method"
self.assertEquals(self.ctrl.GetSelectedCount(), 0)
self.ctrl.Select(1)
self.ctrl.Select(7)
self.assertEquals(self.ctrl.GetSelectedCount(), 2)
# def testGetSelectionCount(self):
# "Test the ListView GetSelectedCount method"
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 0)
#
# self.ctrl.Select(1)
# self.ctrl.Select(7)
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 2)
def testIsSelected(self):
"Test ListView IsSelected for some items"
# ensure that the item is not selected
self.assertEquals(self.ctrl.IsSelected(1), False)
# select an item
self.ctrl.Select(1)
# now ensure that the item is selected
self.assertEquals(self.ctrl.IsSelected(1), True)
def _testFocused(self):
"Test checking the focus of some ListView items"
print("Select something quick!!")
import time
time.sleep(3)
#self.ctrl.Select(1)
print(self.ctrl.IsFocused(0))
print(self.ctrl.IsFocused(1))
print(self.ctrl.IsFocused(2))
print(self.ctrl.IsFocused(3))
print(self.ctrl.IsFocused(4))
print(self.ctrl.IsFocused(5))
#for col in cols:
# print(col)
def testSelect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(3)
self.ctrl.Select(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testSelectText(self):
"Test ListView Selecting some items"
self.ctrl.Select("Venus")
self.ctrl.Select("Jupiter")
self.ctrl.Select("Uranus")
self.assertRaises(ValueError, self.ctrl.Deselect, "Item not in list")
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testDeselect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(4)
self.ctrl.Deselect(3)
self.ctrl.Deselect(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 1)
def testGetProperties(self):
"Test getting the properties for the listview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"ListView", props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
self.assertEquals(props['ColumnCount'], 4)
self.assertEquals(props['ItemCount'], 9)
def testGetColumnTexts(self):
self.dlg.MenuSelect("Styles")
self.app.ControlStyles.StylesListBox.TypeKeys(
"{HOME}" + "{DOWN}"* 12 + "{SPACE}")
self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
self.assertEquals(self.ctrl.GetColumn(0)['text'], "Planet")
self.assertEquals(self.ctrl.GetColumn(1)['text'], "Distance (km)")
self.assertEquals(self.ctrl.GetColumn(2)['text'], "Diameter (km)")
self.assertEquals(self.ctrl.GetColumn(3)['text'], "Mass (kg)")
#
# def testSubItems(self):
#
# for row in range(self.ctrl.ItemCount())
#
# for i in self.ctrl.Items():
#
# #self.assertEquals(item.Text, texts[i])
class TreeViewTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Tree View.exe"))
self.root_text = "The Planets"
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.TreeView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "TreeView")
def testItemCount(self):
"Test the TreeView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 37)
def testGetItem(self):
"Test the ItemCount method"
self.assertRaises(RuntimeError, self.ctrl.GetItem, "test\here\please")
self.assertRaises(IndexError, self.ctrl.GetItem, r"\test\here\please")
self.assertEquals(
self.ctrl.GetItem((0, 1, 2)).Text(), self.texts[1][3] + " kg")
self.assertEquals(
self.ctrl.GetItem(r"\The Planets\Venus\4.869").Text(), self.texts[1][3] + " kg")
self.assertEquals(
self.ctrl.GetItem(
["The Planets", "Venus", "4.869"]).Text(),
self.texts[1][3] + " kg")
def testItemText(self):
"Test the ItemCount method"
self.assertEquals(self.ctrl.Root().Text(), self.root_text)
self.assertEquals(
self.ctrl.GetItem((0, 1, 2)).Text(), self.texts[1][3] + " kg")
def testSelect(self):
"Test selecting an item"
self.ctrl.Select((0, 1, 2))
self.ctrl.GetItem((0, 1, 2)).State()
self.assertEquals(True, self.ctrl.IsSelected((0, 1, 2)))
def testEnsureVisible(self):
"make sure that the item is visible"
# note this is partially a fake test at the moment because
# just by getting an item - we usually make it visible
self.ctrl.EnsureVisible((0, 8, 2))
# make sure that the item is not hidden
self.assertNotEqual(None, self.ctrl.GetItem((0, 8, 2)).Rectangle())
def testGetProperties(self):
"Test getting the properties for the treeview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"TreeView", props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
class HeaderTestCases(unittest.TestCase):
"Unit tests for the Header class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Header.exe"))
self.texts = [u'Distance', u'Diameter', u'Mass']
self.item_rects = [
RECT(0, 0, 90, 26),
RECT(90, 0, 180, 26),
RECT(180, 0, 260, 26)]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Header.WrapperObject()
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "Header")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the header control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testItemCount(self):
self.assertEquals(3, self.ctrl.ItemCount())
def testGetColumnRectangle(self):
for i in range(0, 3):
self.assertEquals(
self.item_rects[i],
self.ctrl.GetColumnRectangle(i))
def testClientRects(self):
test_rects = self.item_rects
test_rects.insert(0, self.ctrl.ClientRect())
self.assertEquals(
test_rects,
self.ctrl.ClientRects())
def testGetColumnText(self):
for i in range(0, 3):
self.assertEquals(
self.texts[i],
self.ctrl.GetColumnText(i))
class StatusBarTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Status bar.exe"))
self.texts = ["Long text", "", "Status Bar"]
self.part_rects = [
RECT(0, 2, 65, 20),
RECT(67, 2, 90, 20),
RECT(92, 2, 357, 20)]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.StatusBar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "StatusBar")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the status bar control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testBorderWidths(self):
"Make sure the border widths are retrieved correctly"
self.assertEquals (
self.ctrl.BorderWidths(),
dict(
Horizontal = 0,
Vertical = 2,
Inter = 2,
)
)
def testPartCount(self):
"Make sure the number of parts is retrieved correctly"
self.assertEquals (self.ctrl.PartCount(), 3)
def testPartRightEdges(self):
"Make sure the part widths are retrieved correctly"
for i in range(0, self.ctrl.PartCount()-1):
self.assertEquals (self.ctrl.PartRightEdges()[i], self.part_rects[i].right)
self.assertEquals(self.ctrl.PartRightEdges()[i+1], -1)
def testGetPartRect(self):
"Make sure the part rectangles are retrieved correctly"
for i in range(0, self.ctrl.PartCount()):
self.assertEquals (self.ctrl.GetPartRect(i), self.part_rects[i])
self.assertRaises(IndexError, self.ctrl.GetPartRect, 99)
def testClientRects(self):
self.assertEquals(self.ctrl.ClientRect(), self.ctrl.ClientRects()[0])
self.assertEquals(self.part_rects, self.ctrl.ClientRects()[1:])
def testGetPartText(self):
self.assertRaises(IndexError, self.ctrl.GetPartText, 99)
for i, text in enumerate(self.texts):
self.assertEquals(text, self.ctrl.GetPartText(i))
class TabControlTestCases(unittest.TestCase):
"Unit tests for the TreeViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Tab.exe"))
self.texts = [
"Pluto", "Neptune", "Uranus",
"Saturn", "Jupiter", "Mars",
"Earth", "Venus", "Mercury", "Sun"]
self.rects = [
RECT(2,2,80,21),
RECT(80,2,174,21),
RECT(174,2,261,21),
RECT(2,21,91,40),
RECT(91,21,180,40),
RECT(180,21,261,40),
RECT(2,40,64,59),
RECT(64,40,131,59),
RECT(131,40,206,59),
RECT(206,40,261,59),
]
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.TabControl.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "TabControl")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the tabcontrol"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testRowCount(self):
self.assertEquals(3, self.ctrl.RowCount())
def testGetSelectedTab(self):
self.assertEquals(6, self.ctrl.GetSelectedTab())
self.ctrl.Select(0)
self.assertEquals(0, self.ctrl.GetSelectedTab())
self.ctrl.Select("Jupiter")
self.assertEquals(4, self.ctrl.GetSelectedTab())
def testTabCount(self):
"Make sure the number of parts is retrieved correctly"
self.assertEquals (self.ctrl.TabCount(), 10)
def testGetTabRect(self):
"Make sure the part rectangles are retrieved correctly"
for i, rect in enumerate(self.rects):
self.assertEquals (self.ctrl.GetTabRect(i), self.rects[i])
self.assertRaises(IndexError, self.ctrl.GetTabRect, 99)
# def testGetTabState(self):
# self.assertRaises(IndexError, self.ctrl.GetTabState, 99)
#
# self.dlg.StatementEdit.SetEditText ("MSG (TCM_HIGHLIGHTITEM,1,MAKELONG(TRUE,0))")
#
# time.sleep(.3)
# # use CloseClick to allow the control time to respond to the message
# self.dlg.Send.CloseClick()
# time.sleep(2)
# print("==\n",self.ctrl.TabStates())
#
# self.assertEquals (self.ctrl.GetTabState(1), 1)
#
# def testTabStates(self):
# print(self.ctrl.TabStates())
# raise "tabstates hiay"
def testGetTabText(self):
for i, text in enumerate(self.texts):
self.assertEquals(text, self.ctrl.GetTabText(i))
self.assertRaises(IndexError, self.ctrl.GetTabText, 99)
def testClientRects(self):
self.assertEquals(self.ctrl.ClientRect(), self.ctrl.ClientRects()[0])
self.assertEquals(self.rects, self.ctrl.ClientRects()[1:])
def testSelect(self):
self.assertEquals(6, self.ctrl.GetSelectedTab())
self.ctrl.Select(1)
self.assertEquals(1, self.ctrl.GetSelectedTab())
self.ctrl.Select("Mercury")
self.assertEquals(8, self.ctrl.GetSelectedTab())
self.assertRaises(IndexError, self.ctrl.Select, 99)
class ToolbarTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "toolbar.exe"))
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Toolbar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "Toolbar")
def testTexts(self):
"Make sure the texts are set correctly"
for txt in self.ctrl.Texts():
self.assertEquals (isinstance(txt, six.string_types), True)
def testGetProperties(self):
"Test getting the properties for the toolbar control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
self.assertEquals(
self.ctrl.ButtonCount(), props['ButtonCount'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testButtonCount(self):
"Test the button count method of the toolbar"
self.assertEquals(self.ctrl.ButtonCount(), 14)
def testGetButton(self):
self.assertRaises(IndexError, self.ctrl.GetButton, 29)
def testGetButtonRect(self):
self.assertEquals(self.ctrl.GetButtonRect(0), RECT(6, 0, 29, 22))
def testGetToolTipsControls(self):
tips = self.ctrl.GetToolTipsControl()
self.assertEquals("Button ID 7" in tips.Texts(),True)
def testPressButton(self):
self.ctrl.PressButton(0)
#print(self.ctrl.Texts())
self.assertRaises(
findbestmatch.MatchError,
self.ctrl.PressButton,
"asdfdasfasdf")
# todo more tests for pressbutton
self.ctrl.PressButton("10")
class RebarTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "rebar.exe"))
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.Rebar.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ReBar")
def testTexts(self):
"Make sure the texts are set correctly"
for txt in self.ctrl.Texts():
self.assertEquals (isinstance(txt, six.string_types), True)
def testBandCount(self):
self.assertEquals(self.ctrl.BandCount(), 2)
def testGetBand(self):
self.assertRaises(IndexError, self.ctrl.GetBand, 99)
self.assertRaises(IndexError, self.ctrl.GetBand, 2)
band = self.ctrl.GetBand(0)
self.assertEquals(band.hwndChild, self.dlg.ToolBar.handle)
#self.assertEquals(band.text, "blah")
def testGetToolTipsControl(self):
self.assertEquals(self.ctrl.GetToolTipsControl(), None)
class ToolTipsTestCases(unittest.TestCase):
"Unit tests for the tooltips class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
self.texts = [u'Tooltip Tool 0', u'Tooltip Tool 1', u'Tooltip Tool 2']
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Tooltip.exe"))
self.app = app
self.dlg = app.MicrosoftControlSpy
tips = app.windows_(
visible_only = False,
enabled_only = False,
top_level_only = False,
class_name = "tooltips_class32")
self.ctrl = WrapHandle(tips[1])
#self.ctrl = HwndWrapper(tips[1])
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.app.kill_()
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ToolTips")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], self.texts)
def testGetProperties(self):
"Test getting the properties for the tooltips control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testGetTip(self):
self.assertRaises(IndexError, self.ctrl.GetTip, 99)
tip = self.ctrl.GetTip(1)
self.assertEquals(tip.text, self.texts[1])
def testToolCount(self):
self.assertEquals(3, self.ctrl.ToolCount())
def testGetTipText(self):
self.assertEquals(self.texts[1], self.ctrl.GetTipText(1))
def testTexts(self):
self.assertEquals(self.ctrl.Texts()[0], '')
self.assertEquals(self.ctrl.Texts()[1:], self.texts)
class UpDownTestCases(unittest.TestCase):
"Unit tests for the UpDownWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app.start_(os.path.join(controlspy_folder, "Up-Down.exe"))
self.app = app
self.dlg = app.MicrosoftControlSpy
self.ctrl = app.MicrosoftControlSpy.UpDown2.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always, and show checkboxes
#app.ControlStyles.ListBox1.TypeKeys(
# "{HOME}{SPACE}" + "{DOWN}"* 12 + "{SPACE}")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "UpDown")
def testTexts(self):
"Make sure the texts are set correctly"
self.assertEquals (self.ctrl.Texts()[1:], [])
def testGetProperties(self):
"Test getting the properties for the updown control"
props = self.ctrl.GetProperties()
self.assertEquals(
self.ctrl.FriendlyClassName(), props['FriendlyClassName'])
self.assertEquals(
self.ctrl.Texts(), props['Texts'])
for prop_name in props:
self.assertEquals(getattr(self.ctrl, prop_name)(), props[prop_name])
def testGetValue(self):
"Test getting up-down position"
self.assertEquals (self.ctrl.GetValue(), 0)
self.ctrl.SetValue(23)
self.assertEquals (self.ctrl.GetValue(), 23)
def testSetValue(self):
"Test setting up-down position"
self.assertEquals (self.ctrl.GetValue(), 0)
self.ctrl.SetValue(23)
self.assertEquals (self.ctrl.GetValue(), 23)
self.assertEquals(
int(self.ctrl.GetBuddyControl().Texts()[1]),
23)
def testGetBase(self):
"Test getting the base of the up-down control"
self.assertEquals (self.ctrl.GetBase(), 10)
self.dlg.StatementEdit.SetEditText ("MSG (UDM_SETBASE, 16, 0)")
# use CloseClick to allow the control time to respond to the message
self.dlg.Send.Click()
self.assertEquals (self.ctrl.GetBase(), 16)
def testGetRange(self):
"Test getting the range of the up-down control"
self.assertEquals((0, 9999), self.ctrl.GetRange())
def testGetBuddy(self):
"Test getting the buddy control"
self.assertEquals (self.ctrl.GetBuddyControl().handle, self.dlg.Edit6.handle)
def testIncrement(self):
"Test incremementing up-down position"
self.ctrl.Increment()
self.assertEquals (self.ctrl.GetValue(), 1)
def testDecrement(self):
"Test decrementing up-down position"
self.ctrl.SetValue(23)
self.ctrl.Decrement()
self.assertEquals (self.ctrl.GetValue(), 22)
if __name__ == "__main__":
unittest.main() | airelil/pywinauto-64 | pywinauto/unittests/test_common_controls.py | Python | lgpl-2.1 | 33,312 |
#!/usr/bin/env python
import analyze_conf
import sys
import datetime, glob, job_stats, os, subprocess, time
import operator
import matplotlib
# Set the matplotlib output mode from config if it exists
if not 'matplotlib.pyplot' in sys.modules:
try:
matplotlib.use(analyze_conf.matplotlib_output_mode)
except NameError:
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy
import scipy, scipy.stats
import argparse
import tspl, tspl_utils, lariat_utils, plot
import math
import multiprocessing, functools, itertools
import cPickle as pickle
def do_work(file,mintime,wayness,lariat_dict):
retval=(None,None,None,None,None)
res=plot.get_data(file,mintime,wayness,lariat_dict)
if (res is None):
return retval
(ts, ld, tmid,
read_rate, write_rate, stall_rate, clock_rate, avx_rate, sse_rate, inst_rate,
meta_rate, l1_rate, l2_rate, l3_rate, load_rate, read_frac, stall_frac) = res
# return (scipy.stats.tmean(stall_frac),
# scipy.stats.tmean((load_rate - (l1_rate + l2_rate +
# l3_rate))/load_rate))
mean_mem_rate=scipy.stats.tmean(read_rate+write_rate)*64.0
ename=ld.exc.split('/')[-1]
ename=tspl_utils.string_shorten(ld.comp_name(ename,ld.equiv_patterns),8)
if ename=='unknown':
return retval
flag=False
if mean_mem_rate < 75.*1000000000./16.:
flag=True
return (scipy.stats.tmean(stall_frac),
scipy.stats.tmean((load_rate - (l1_rate))/load_rate),
scipy.stats.tmean(clock_rate/inst_rate),ename,
flag)
def main():
parser = argparse.ArgumentParser(description='Look for imbalance between'
'hosts for a pair of keys')
parser.add_argument('filearg', help='File, directory, or quoted'
' glob pattern', nargs='?',default='jobs')
parser.add_argument('-p', help='Set number of processes',
nargs=1, type=int, default=[1])
n=parser.parse_args()
filelist=tspl_utils.getfilelist(n.filearg)
procs = min(len(filelist),n.p[0])
job=pickle.load(open(filelist[0]))
jid=job.id
epoch=job.end_time
ld=lariat_utils.LariatData(jid,end_epoch=epoch,daysback=3,directory=analyze_conf.lariat_path)
if procs < 1:
print 'Must have at least one file'
exit(1)
pool = multiprocessing.Pool(processes=procs)
partial_work=functools.partial(do_work,mintime=3600.,wayness=16,lariat_dict=ld.ld)
results=pool.map(partial_work,filelist)
fig1,ax1=plt.subplots(1,1,figsize=(20,8),dpi=80)
fig2,ax2=plt.subplots(1,1,figsize=(20,8),dpi=80)
maxx=0.
for state in [ True, False ]:
stalls=[]
misses=[]
cpis=[]
enames=[]
for (s,m,cpi,ename,flag) in results:
if (s != None and m > 0. and m < 1.0 and flag==state):
stalls.extend([s])
misses.extend([m])
cpis.extend([cpi])
enames.extend([ename])
markers = itertools.cycle(('o','x','+','^','s','8','p',
'h','*','D','<','>','v','d','.'))
colors = itertools.cycle(('b','g','r','c','m','k','y'))
fmt={}
for e in enames:
if not e in fmt:
fmt[e]=markers.next()+colors.next()
for (s,c,e) in zip(stalls,cpis,enames):
# ax1.plot(numpy.log10(1.-(1.-s)),numpy.log10(c),
maxx=max(maxx,1./(1.-s))
ax1.plot((1./(1.-s)),(c),
marker=fmt[e][0],
markeredgecolor=fmt[e][1],
linestyle='', markerfacecolor='None',
label=e)
ax1.hold=True
ax2.plot((1./(1.-s)),(c),
marker=fmt[e][0],
markeredgecolor=fmt[e][1],
linestyle='', markerfacecolor='None',
label=e)
ax2.hold=True
#ax.plot(numpy.log10(stalls),numpy.log10(cpis),fmt)
#ax.plot(numpy.log10(1.0/(1.0-numpy.array(stalls))),numpy.log10(cpis),fmt)
ax1.set_xscale('log')
ax1.set_xlim(left=0.95,right=1.05*maxx)
ax1.set_yscale('log')
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.45, box.height])
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.45, box.height])
handles=[]
labels=[]
for h,l in zip(*ax1.get_legend_handles_labels()):
if l in labels:
continue
else:
handles.extend([h])
labels.extend([l])
ax1.legend(handles,labels,bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0., numpoints=1,ncol=4)
ax1.set_xlabel('log(Cycles per Execution Cycle)')
ax1.set_ylabel('log(CPI)')
handles=[]
labels=[]
for h,l in zip(*ax2.get_legend_handles_labels()):
if l in labels:
continue
else:
handles.extend([h])
labels.extend([l])
ax2.legend(handles,labels,bbox_to_anchor=(1.05, 1),
loc=2, borderaxespad=0., numpoints=1,ncol=4)
ax2.set_xlabel('Cycles per Execution Cycle')
ax2.set_ylabel('CPI')
fname='miss_v_stall_log'
fig1.savefig(fname)
fname='miss_v_stall'
fig2.savefig(fname)
plt.close()
if __name__ == '__main__':
main()
| ubccr/tacc_stats | analyze/process_pickles/miss_vs_stall.py | Python | lgpl-2.1 | 5,040 |
# -*- coding: utf-8 -*-
from pysignfe.xml_sped import *
from .Rps import IdentificacaoPrestador, IdentificacaoRps
import os
DIRNAME = os.path.dirname(__file__)
class MensagemRetorno(XMLNFe):
def __init__(self):
super(MensagemRetorno, self).__init__()
self.Codigo = TagCaracter(nome=u'Codigo', tamanho=[1, 4], raiz=u'/[nfse]')
self.Mensagem = TagCaracter(nome=u'Mensagem', tamanho=[1, 200], raiz=u'/[nfse]')
self.Correcao = TagCaracter(nome=u'Correcao', tamanho=[0, 200], raiz=u'/[nfse]')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<MensagemRetorno>'
xml += self.Codigo.xml
xml += self.Mensagem.xml
xml += self.Correcao.xml
xml += u'</MensagemRetorno>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Codigo.xml = arquivo
self.Mensagem.xml = arquivo
self.Correcao.xml = arquivo
xml = property(get_xml, set_xml)
class MensagemRetornoLote(XMLNFe):
def __init__(self):
super(MensagemRetornoLote, self).__init__()
self.IdentificacaoRps = IdentificacaoRps()
self.Codigo = TagCaracter(nome=u'Codigo', tamanho=[1, 4], raiz=u'/[nfse]')
self.Mensagem = TagCaracter(nome=u'Mensagem', tamanho=[1, 200], raiz=u'/[nfse]')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<MensagemRetornoLote>'
xml += self.IdentificacaoRps.xml
xml += self.Codigo.xml
xml += self.Mensagem.xml
xml += u'</MensagemRetornoLote>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.IdentificacaoRps.xml = arquivo
self.Codigo.xml = arquivo
self.Mensagem.xml = arquivo
xml = property(get_xml, set_xml)
class ListaMensagemRetornoLote(XMLNFe):
def __init__(self):
super(ListaMensagemRetornoLote, self).__init__()
self.MensagemRetornoLote = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ListaMensagemRetornoLote>'
for m in self.MensagemRetornoLote:
xml += tira_abertura(m.xml)
xml += u'</ListaMensagemRetornoLote>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.MensagemRetornoLote = self.le_grupo('[nfse]//ListaMensagemRetornoLote/MensagemRetornoLote', MensagemRetornoLote)
xml = property(get_xml, set_xml)
class ListaMensagemRetorno(XMLNFe):
def __init__(self):
super(ListaMensagemRetorno, self).__init__()
self.MensagemRetorno = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ListaMensagemRetorno>'
for m in self.MensagemRetorno:
xml += tira_abertura(m.xml)
xml += u'</ListaMensagemRetorno>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.MensagemRetorno = self.le_grupo('[nfse]//ListaMensagemRetorno/MensagemRetorno', MensagemRetorno)
xml = property(get_xml, set_xml)
class ConsultarSituacaoLoteRpsEnvio(XMLNFe):
def __init__(self):
super(ConsultarSituacaoLoteRpsEnvio, self).__init__()
self.versao = TagDecimal(nome=u'ConsultarSituacaoLoteRpsEnvio', propriedade=u'versao', namespace=NAMESPACE_NFSE, valor=u'1.00', raiz=u'/')
self.Prestador = IdentificacaoPrestador()
self.Protocolo = TagCaracter(nome=u'Protocolo', tamanho=[ 1, 50], raiz=u'/')
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarSituacaoLoteRpsEnvio xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.Prestador.xml.replace(ABERTURA, u'')
xml += self.Protocolo.xml
xml += u'</ConsultarSituacaoLoteRpsEnvio>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Prestador.xml = arquivo
self.Protocolo.xml = arquivo
xml = property(get_xml, set_xml)
class ConsultarSituacaoLoteRpsResposta(XMLNFe):
def __init__(self):
super(ConsultarSituacaoLoteRpsResposta, self).__init__()
self.NumeroLote = TagInteiro(nome=u'NumeroLote', tamanho=[1, 15], raiz=u'/')
self.Situacao = TagInteiro(nome=u'Situacao', tamanho=[1, 1], raiz=u'/')
self.ListaMensagemRetorno = ListaMensagemRetorno()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/')
self.arquivo_esquema = u'nfse.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += u'<ConsultarSituacaoLoteRpsResposta xmlns="'+ NAMESPACE_NFSE + '">'
xml += self.NumeroLote.xml
xml += self.Situacao.xml
xml += self.ListaMensagemRetorno.xml.replace(ABERTURA, u'')
xml += u'</ConsultarSituacaoLoteRpsResposta>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.NumeroLote.xml = arquivo
self.Situacao.xml = arquivo
self.ListaMensagemRetorno.xml = arquivo
xml = property(get_xml, set_xml) | thiagopena/PySIGNFe | pysignfe/nfse/bhiss/v10/ConsultarSituacaoLoteRps.py | Python | lgpl-2.1 | 5,547 |
# Written by Andrea Reale
# see LICENSE.txt for license information
from Tribler.Core.Subtitles.MetadataDomainObjects.SubtitleInfo import SubtitleInfo
from Tribler.Core.Subtitles.MetadataDomainObjects.MetadataDTO import MetadataDTO
from Tribler.Core.CacheDB.SqliteCacheDBHandler import BasicDBHandler
import threading
from Tribler.Core.CacheDB.sqlitecachedb import SQLiteCacheDB
import sys
from Tribler.Core.Subtitles.MetadataDomainObjects.MetadataExceptions import SignatureException, \
MetadataDBException
from Tribler.Core.Utilities.utilities import bin2str, str2bin
import sqlite3
import time
SUBTITLE_LANGUAGE_CODE = "lang"
SUBTITLE_PATH = "path"
METADATA_TABLE = "Metadata"
MD_ID_KEY = "metadata_id"
MD_PUBLISHER_KEY = "publisher_id"
MD_INFOHASH_KEY = "infohash"
MD_DESCRIPTION_KEY = "description"
MD_TIMESTAMP_KEY = "timestamp"
MD_SIGNATURE_KEY = "signature"
SUBTITLES_TABLE = "Subtitles"
SUB_MD_FK_KEY = "metadata_id_fk"
SUB_LANG_KEY = "subtitle_lang"
SUB_LOCATION_KEY = "subtitle_location"
SUB_CHECKSUM_KEY = "checksum"
SUBTITLES_HAVE_TABLE = "SubtitlesHave"
SH_MD_FK_KEY = "metadata_id_fk"
SH_PEER_ID_KEY = "peer_id"
SH_HAVE_MASK_KEY = "have_mask"
SH_TIMESTAMP = "received_ts"
# maximum number of have entries returned
# by the database (-1 for unlimited)
SH_RESULTS_LIMIT = 200
DEBUG = False
#it's good to have all of the queries in one place:
#the code is more easy to read, and if some query is wrong
#it is easier to correct them all
SELECT_SUBS_JOIN_BASE = "SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LANG_KEY \
+ ", sub." + SUB_LOCATION_KEY \
+ ", sub." + SUB_CHECKSUM_KEY \
+ " FROM " + METADATA_TABLE + " AS md " \
+ "INNER JOIN " \
+ SUBTITLES_TABLE + " AS sub " \
+ "ON md." + MD_ID_KEY + " = sub." + SUB_MD_FK_KEY
MD_SH_JOIN_CLAUSE = \
METADATA_TABLE + " AS md " \
+ "INNER JOIN " \
+ SUBTITLES_HAVE_TABLE + " AS sh " \
+ "ON md." + MD_ID_KEY + " = sh." + SH_MD_FK_KEY
QUERIES = {
"SELECT SUBS JOIN HASH ALL" :
SELECT_SUBS_JOIN_BASE
+ " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+ " AND md." + MD_PUBLISHER_KEY + " = ?;",
"SELECT SUBS JOIN HASH ONE" :
SELECT_SUBS_JOIN_BASE
+ " WHERE md." + MD_INFOHASH_KEY + " = ?"\
+ " AND md." + MD_PUBLISHER_KEY + " = ?"\
+ " AND sub." + SUB_LANG_KEY + " = ?;",
"SELECT SUBS FK ALL" :
"SELECT * FROM " + SUBTITLES_TABLE
+ " WHERE " + SUB_MD_FK_KEY + " = ?;",
"SELECT SUBS FK ONE" :
"SELECT * FROM " + SUBTITLES_TABLE
+ " WHERE " + SUB_MD_FK_KEY + " = ?"\
+ " AND " + SUB_LANG_KEY + " = ?;",
"SELECT METADATA" :
"SELECT * FROM " \
+ METADATA_TABLE + " WHERE " + MD_INFOHASH_KEY + " = ?" \
+ " AND " + MD_PUBLISHER_KEY + " = ?;",
"SELECT NRMETADATA" :
"SELECT COUNT(*) FROM " \
+ METADATA_TABLE + " WHERE " + MD_PUBLISHER_KEY + " = ?;",
"SELECT PUBLISHERS FROM INFOHASH":
"SELECT " + MD_PUBLISHER_KEY + " FROM " + METADATA_TABLE \
+ " WHERE " + MD_INFOHASH_KEY + " = ?;",
"UPDATE METADATA" :
"UPDATE " + METADATA_TABLE \
+ " SET " \
+ MD_DESCRIPTION_KEY + " = ?, " \
+ MD_TIMESTAMP_KEY + " = ?, " \
+ MD_SIGNATURE_KEY + " = ?" \
+ " WHERE " + MD_INFOHASH_KEY + " = ?" \
+ " AND " + MD_PUBLISHER_KEY + " = ?;",
"UPDATE SUBTITLES" :
"UPDATE " + SUBTITLES_TABLE \
+ " SET " + SUB_LOCATION_KEY + "= ?, " \
+ SUB_CHECKSUM_KEY + "= ?" \
+ " WHERE " + SUB_MD_FK_KEY + "= ?" \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ONE SUBTITLES" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY + "= ? " \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ONE SUBTITLE JOIN" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY \
+ " IN ( SELECT " + MD_ID_KEY + " FROM " + METADATA_TABLE \
+ " WHERE " + MD_PUBLISHER_KEY + " = ?" \
+ " AND " + MD_INFOHASH_KEY + " = ? )" \
+ " AND " + SUB_LANG_KEY + "= ?;",
"DELETE ALL SUBTITLES" :
"DELETE FROM " + SUBTITLES_TABLE \
+ " WHERE " + SUB_MD_FK_KEY + "= ?;",
"DELETE METADATA PK" :
"DELETE FROM " + METADATA_TABLE \
+ " WHERE " + MD_ID_KEY + " = ?;",
"INSERT METADATA" :
"INSERT or IGNORE INTO " + METADATA_TABLE + " VALUES " \
+ "(NULL,?,?,?,?,?)",
"INSERT SUBTITLES" :
"INSERT INTO " + SUBTITLES_TABLE + " VALUES (?, ?, ?, ?);",
"SELECT SUBTITLES WITH PATH":
"SELECT sub." + SUB_MD_FK_KEY + ", sub." + SUB_LOCATION_KEY + ", sub." \
+ SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+ ", m." + MD_PUBLISHER_KEY + ", m." + MD_INFOHASH_KEY \
+ " FROM " + METADATA_TABLE + " AS m " \
+"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+ "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+ " WHERE " \
+ SUB_LOCATION_KEY + " IS NOT NULL;",
"SELECT SUBTITLES WITH PATH BY CHN INFO":
"SELECT sub." + SUB_LOCATION_KEY + ", sub." \
+ SUB_LANG_KEY + ", sub." + SUB_CHECKSUM_KEY \
+ " FROM " + METADATA_TABLE + " AS m " \
+"INNER JOIN " + SUBTITLES_TABLE + " AS sub "\
+ "ON m." + MD_ID_KEY + " = " + " sub." + SUB_MD_FK_KEY \
+ " WHERE sub." \
+ SUB_LOCATION_KEY + " IS NOT NULL" \
+ " AND m." + MD_PUBLISHER_KEY + " = ?"\
+ " AND m." + MD_INFOHASH_KEY + " = ?;" ,
"INSERT HAVE MASK":
"INSERT INTO " + SUBTITLES_HAVE_TABLE + " VALUES " \
+ "(?, ?, ?, ?);",
"GET ALL HAVE MASK":
"SELECT sh." + SH_PEER_ID_KEY + ", sh." + SH_HAVE_MASK_KEY \
+ ", sh." + SH_TIMESTAMP \
+ " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+ " = ? AND md." + MD_INFOHASH_KEY + " = ? "\
+ "ORDER BY sh." + SH_TIMESTAMP + " DESC" \
+ " LIMIT " + str(SH_RESULTS_LIMIT) + ";",
"GET ONE HAVE MASK":
"SELECT sh." + SH_HAVE_MASK_KEY \
+ ", sh." + SH_TIMESTAMP \
+ " FROM " + MD_SH_JOIN_CLAUSE + " WHERE md." + MD_PUBLISHER_KEY \
+ " = ? AND md." + MD_INFOHASH_KEY + " = ? AND sh." + SH_PEER_ID_KEY \
+ " = ?;",
"UPDATE HAVE MASK":
"UPDATE " + SUBTITLES_HAVE_TABLE \
+ " SET " + SH_HAVE_MASK_KEY + " = ?, " \
+ SH_TIMESTAMP + " = ?" \
+ " WHERE " + SH_PEER_ID_KEY + " = ?" \
+ " AND " + SH_MD_FK_KEY + " IN " \
+ "( SELECT + " + MD_ID_KEY+ " FROM " \
+ METADATA_TABLE + " WHERE + "\
+ MD_PUBLISHER_KEY + " = ?"\
+ " AND " + MD_INFOHASH_KEY + " = ? );",
"DELETE HAVE":
"DELETE FROM " + SUBTITLES_HAVE_TABLE \
+ " WHERE " + SH_PEER_ID_KEY + " = ?" \
+ " AND " + SH_MD_FK_KEY + " IN " \
+ "( SELECT + " + MD_ID_KEY+ " FROM " \
+ METADATA_TABLE + " WHERE + "\
+ MD_PUBLISHER_KEY + " = ?"\
+ " AND " + MD_INFOHASH_KEY + " = ? );",
"CLEANUP OLD HAVE":
"DELETE FROM " + SUBTITLES_HAVE_TABLE \
+ " WHERE " + SH_TIMESTAMP + " < ? " \
+ " AND " + SH_PEER_ID_KEY + " NOT IN " \
+ "( SELECT md." + MD_PUBLISHER_KEY + " FROM " \
+ METADATA_TABLE + " AS md WHERE md." + MD_ID_KEY \
+ " = " + SH_MD_FK_KEY + " );"
}
class MetadataDBHandler (object, BasicDBHandler):
"""
Data Access Layer for the subtitles database.
"""
__single = None # used for multithreaded singletons pattern
_lock = threading.RLock()
@staticmethod
def getInstance(*args, **kw):
if MetadataDBHandler.__single is None:
MetadataDBHandler._lock.acquire()
try:
if MetadataDBHandler.__single is None:
MetadataDBHandler(*args, **kw)
finally:
MetadataDBHandler._lock.release()
return MetadataDBHandler.__single
def __init__(self, db=SQLiteCacheDB.getInstance()):
# notice that singleton pattern is not enforced.
# This way the code is more easy
# to test.
try:
MetadataDBHandler._lock.acquire()
MetadataDBHandler.__single = self
finally:
MetadataDBHandler._lock.release()
try:
self._db = db
# Don't know what those life should know. Assuming I don't need
# them 'till a countrary proof! (Ask Nitin)
# BasicDBHandler.__init__(self,db,METADATA_TABLE)
# BasicDBHandler.__init__(self,db,SUBTITLES_TABLE)
print >> sys.stderr, "Metadata: DB made"
except:
print >> sys.stderr, "Metadata: couldn't make the tables"
print >> sys.stderr, "Metadata DB Handler initialized"
def commit(self):
self._db.commit()
# Commented for the sake of API simplicity
# But then uncommented for coding simplicity :P
def getAllSubtitles(self, channel, infohash):
"""
Get all the available subtitles for a channel and infohash.
Returns a list representing subtitles that are available for
a givenchannel and infohash.
@param channel: the perm_id of the channel owner (binary)
@param infohash: the infhash of a channel elements as it
is announced in ChannelCast (binary)
@return: a dictionary of { lang : SubtitleInfo instance}
"""
query = QUERIES["SELECT SUBS JOIN HASH ALL"]
infohash = bin2str(infohash)
channel = bin2str(channel)
results = self._db.fetchall(query, (infohash, channel))
subsDict = {}
for entry in results:
subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], entry[3])
return subsDict
def _deleteSubtitleByChannel(self, channel, infohash, lang):
'''
Remove a subtitle for a channel infohash
@param channel: the channel where the subtitle is (binary)
@param infohash: the infohash of the torrent referred by the subtitle
(binary)
@param lang: ISO-639-2 language code of the subtitle to remove
'''
query = QUERIES["DELETE ONE SUBTITLE JOIN"]
infohash = bin2str(infohash)
channel = bin2str(channel)
self._db.execute_write(query,(channel, infohash, lang))
def _getAllSubtitlesByKey(self, metadataKey):
'''
Retrieves every subtitles given a Metadata table key
Given an instance of the Metadata table artificial key, retrieves
every subtitle instance associated to that key
@param metadataKey: a value of an artificial key in the Metadata table
@return : a dictionary of type {lang : SubtitleInfo}, empty if no results
'''
query = QUERIES["SELECT SUBS FK ALL"]
results = self._db.fetchall(query, (metadataKey,))
subsDict = {}
for entry in results:
subsDict[entry[1]] = SubtitleInfo(entry[1], entry[2], str2bin(entry[3]))
return subsDict
# commented for the sake of API simplicity
# def hasSubtitleInLang(self,channel,infohash, lang):
# """
# Checks whether an item in a channel as available subitltles.
#
# @param channel: a perm_id identifying the owner of the channel.
# @param infohash: the infohash of an item, as announced in channelcast
# messages.
# @param lang: a 3 characters ISO 639-2 language code, identifying
# the desired subtitle langugage
# @return: bool
# """
# sub = self.getSubtitle(channel, infohash, lang)
# return sub is not None
#
# commented for the sake of api simplicity
# But then uncommented for coding simplicity :P
def getSubtitle(self, channel, infohash, lang):
"""
Get a subtitle for a language for a given item in a given channel.
Returns the details reguarding a subtitles in a given language for a
given item in a given channel, if it exists. Otherwise it returns
None.
@param channel: a perm_id identifying the owner of the channel.
@param infohash: the infohash of an item, as announced in channelcast
messages.
@param lang: a 3 characters ISO 639-2 language code, identifying
the desired subtitle langugage
@return: a SubtitleInfo instance
"""
query = QUERIES["SELECT SUBS JOIN HASH ONE"]
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel, lang))
if len(res) == 0 :
return None
elif len(res) == 1 :
checksum = str2bin(res[0][3])
return SubtitleInfo(res[0][1], res[0][2], checksum)
else :
# This should be not possible to database constraints
raise MetadataDBException("Metadata DB Constraint violeted!")
def _getSubtitleByKey(self, metadata_fk, lang):
"""
Return a subtitle in a given language for a key of the Metadata table.
Given an instance of the artificial key in the metadata table,
retrieves a SubtitleInfo instance for that key and the language passed in.
@param metadata_fk: a key in the metadata table
@param lang: a language code for the subtitle to be retrieved
@return: a SubtitleInfo instance, or None
"""
query = QUERIES["SELECT SUBS FK ONE"]
res = self._db.fetchall(query, (metadata_fk, lang))
if len(res) == 0 :
return None
elif len(res) == 1 :
checksum = str2bin(res[0][3])
return SubtitleInfo(res[0][1], res[0][2], checksum)
else :
# This should be not possible to database constraints
raise MetadataDBException("Metadata DB Constraint violeted!")
def getMetadata(self, channel, infohash):
"""
Returns a MetadataDTO instance for channel/infohash if available in DB
Given a channel/infhash couple returns a MetadataDTO instance, built
with the values retrieved from the Metadata and Subtitles DB. If
no result returns None
@param channel: the permid of the channel's owner (binary)
@param infohash: the infohash of the item the metadata refers to
(binary)
@return: a MetadataDTO instance comprehensive of subtitles if any
metadata is found in the DB. None otherwise.
"""
query = QUERIES["SELECT METADATA"]
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel))
if len(res) == 0:
return None
if len(res) > 1:
raise MetadataDBException("Metadata DB Constraint violated")
metaTuple = res[0]
subsDictionary = self._getAllSubtitlesByKey(metaTuple[0])
publisher = str2bin(metaTuple[1])
infohash = str2bin(metaTuple[2])
timestamp = int(metaTuple[4])
description = unicode(metaTuple[3])
signature = str2bin(metaTuple[5])
toReturn = MetadataDTO(publisher, infohash,
timestamp, description, None,
signature)
for sub in subsDictionary.itervalues():
toReturn.addSubtitle(sub)
return toReturn
def getNrMetadata(self, channel):
"""
Returns the number of metadata objects inside a channel
"""
query = QUERIES['SELECT NRMETADATA']
channel = bin2str(channel)
return self._db.fetchone(query, (channel, ))
def getAllMetadataForInfohash(self, infohash):
"""
Returns a list of MetadataDTO instances for a given infohash
Given a torrent infohash returns a list of MetadataDTO instances for
that infohash. Each one of the MetadataDTO refers to a different
channel.
@param infohash: the infohash for the torrent (binary)
@return: a list of MetadataDTO isntances (or empty list if nothing
is found)
"""
assert infohash is not None
strinfohash = bin2str(infohash)
query = QUERIES["SELECT PUBLISHERS FROM INFOHASH"]
channels = self._db.fetchall(query, (strinfohash,))
return [self.getMetadata(str2bin(entry[0]), infohash) for entry in channels]
def hasMetadata(self, channel, infohash):
"""
Checks whether there exists some metadata for an item in a channel.
@param channel: a perm_id identifying the owner of the channel.
@param infohash: the infohash of an item, as announced in channelcast
messages.
@return boolean
"""
query = QUERIES["SELECT METADATA"]
infohash = bin2str(infohash)
channel = bin2str(channel)
res = self._db.fetchall(query, (infohash, channel))
return len(res) != 0
def insertMetadata(self, metadata_dto):
'''
Insert the metadata contained in a Metadata DTO in the database.
If an entry relative to the same channel and infohash of the provided
dto already exists in the db, the db is updated only if the timestamp
of the new dto is newer then the entry in the database.
If there is no such an entry, a new wan in the Metadata DB is created
along with the required entries in the SubtitleInfo DB
@type metadata_dto: MetadataDTO
@param metada_dto: an instance of MetadataDTO describing metadata
@return True if an existing entry was updated, false if a new entry
was interested. Otherwise None.
'''
assert metadata_dto is not None
assert isinstance(metadata_dto, MetadataDTO)
#try to retrieve a correspindng record for channel,infhoash
#won't do nothing if the metadata_dto is not correctly signed
if not metadata_dto.verifySignature():
raise SignatureException("Metadata to insert is not properly" \
"signed")
select_query = QUERIES["SELECT METADATA"]
signature = bin2str(metadata_dto.signature)
infohash = bin2str(metadata_dto.infohash)
channel = bin2str(metadata_dto.channel)
res = self._db.fetchall(select_query,
(infohash, channel))
isUpdate = False
if len(res) != 0 :
#updated if the new message is newer
if metadata_dto.timestamp > res[0][4] :
query = QUERIES["UPDATE METADATA"]
self._db.execute_write(query,
(metadata_dto.description,
metadata_dto.timestamp,
signature,
infohash,
channel,),
False) #I don't want the transaction to commit now
fk_key = res[0][0]
isUpdate = True
else:
return
else: #if is this a whole new metadata item
query = QUERIES["INSERT METADATA"]
self._db.execute_write(query,
(channel,
infohash,
metadata_dto.description,
metadata_dto.timestamp,
signature,
),
True)
if DEBUG:
print >> sys.stderr, "Performing query on db: " + query
newRows = self._db.fetchall(select_query,
(infohash, channel))
if len(newRows) == 0 :
raise IOError("No results, while there should be one")
fk_key = newRows[0][0]
self._insertOrUpdateSubtitles(fk_key, metadata_dto.getAllSubtitles(), \
False)
self._db.commit() #time to commit everything
return isUpdate
def _insertOrUpdateSubtitles(self, fk_key, subtitles, commitNow=True):
"""
Given a dictionary of subtitles updates the corrisponding entries.
This method takes as input a foreign key for the Metadata table,
and a dictionary of type {lang : SubtitleInfo}. Then it updates the
SubtitleInfo table, updating existing entries, deleting entries that are
in the db but not in the passed dictionary, and inserting entries
that are in the dictionary but not in the db.
@param fk_key: a foreign key from the Metadata table. Notice that
sqlite does not enforce the fk constraint. Be careful!
@param subtitles: a dictionary {lang : SubtitleInfo} (subtitle must be
an instance of SubtitleInfo)
@param commitNow: if False the transaction is not committed
"""
allSubtitles = self._getAllSubtitlesByKey(fk_key)
oldSubsSet = frozenset(allSubtitles.keys())
newSubsSet = frozenset(subtitles.keys())
commonLangs = oldSubsSet & newSubsSet
newLangs = newSubsSet - oldSubsSet
toDelete = oldSubsSet - newSubsSet
#update existing subtitles
for lang in commonLangs:
self._updateSubtitle(fk_key, subtitles[lang], False)
#remove subtitles that are no more in the set
for lang in toDelete:
self._deleteSubtitle(fk_key, lang, False)
#insert new subtitles
for lang in newLangs:
self._insertNewSubtitle(fk_key, subtitles[lang], False)
if commitNow:
self._db.commit()
def _updateSubtitle(self, metadata_fk, subtitle, commitNow=True):
"""
Update an entry in the Subtitles database.
If the entry identified by metadata_fk, subtitle.lang does not exist
in the subtitle database this method does nothing.
@param metadata_fk: foreign key of the metadata table
@param subtitle: instance of Subitle containing the data to insert
@param commitNow: if False, this method does not commit the changes to
the database
"""
assert metadata_fk is not None
assert subtitle is not None
assert isinstance(subtitle, SubtitleInfo)
toUpdate = self._getSubtitleByKey(metadata_fk, subtitle.lang)
if toUpdate is None:
return
query = QUERIES["UPDATE SUBTITLES"]
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (subtitle.path,
checksum, metadata_fk, subtitle.lang),
commitNow)
def updateSubtitlePath(self, channel, infohash, lang, newPath, commitNow=True):
"""
Updates a subtitle entry in the database if it exists.
Given the channel, the infohash, and a SubtitleInfo instance,
the entry relative to that subtitle is updated accordingly
to the details in the SubtitleInfo instance.
If an instance for the provided channel, infohash, and language
does not already exist in the db, nothing is done.
@param channel: the channel id (permid) of the channel for the
subtitle (binary)
@param infohash: the infohash of the item the subtitle refrs to
(binary)
@param lang: the language of the subtitle to update
@param path: the new path of the subtitle. None to indicate that the
subtitle is not available
@return True if an entry was updated in the db. False if nothing
got written on the db
@precondition: subtitle.lang is not None
"""
query = QUERIES["SELECT SUBS JOIN HASH ONE"]
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query, (infohash, channel, lang))
if len(res) > 1 :
raise MetadataDBException("Metadata DB constraint violated")
elif len(res) == 0 :
if DEBUG:
print >> sys.stderr, "Nothing to update for channel %s, infohash %s, lang"\
" %s. Doing nothing." % (channel[-10:],\
infohash, lang)
return False
else:
query = QUERIES["UPDATE SUBTITLES"]
self._db.execute_write(query, (newPath,
res[0][3], res[0][0], lang),
commitNow)
return True
def _deleteSubtitle(self, metadata_fk, lang, commitNow=True):
"""
Delete an entry from the subtitles table.
Given a foreign key from the metadata table and a language delets
the corresponding entry in the subtitle table. If the entry
is not found, it does nothing.
@param metadata_fk: a foreign key from the Metadata table
@param lang: a 3 characters language code
@param commitNow: if False does not commit the transaction
"""
assert metadata_fk is not None
assert lang is not None
query = QUERIES["DELETE ONE SUBTITLES"]
self._db.execute_write(query, (metadata_fk, lang), commitNow)
def _insertNewSubtitle(self, metadata_fk, subtitle, commitNow=True) :
"""
Insert a new subtitle entry in the Subtitles table.
Given a foreign key from the Metadata table, and a SubtitleInfo instance
describing the subtitle to insert, adds it to the metadata table.
This method assumes that that entry does not already exist in the
table.
NOTICE that sqlite does not enforce the foreign key constraint,
so be careful about integrity
"""
assert metadata_fk is not None
assert subtitle is not None
assert isinstance(subtitle, SubtitleInfo)
query = QUERIES["INSERT SUBTITLES"]
checksum = bin2str(subtitle.checksum)
self._db.execute_write(query, (metadata_fk, subtitle.lang,
subtitle.path, checksum),
commitNow)
def deleteMetadata(self, channel, infohash):
"""
Removes all the metadata associated to a channel/infohash.
Everything is dropped from both the Metadata and Subtitles db.
@param channel: the permid of the channel's owner
@param infohash: the infhoash of the entry
"""
assert channel is not None
assert infohash is not None
channel = bin2str(channel)
infohash = bin2str(infohash)
query = QUERIES["SELECT METADATA"]
if DEBUG:
print >> sys.stderr, "Performing query on db: " + query
res = self._db.fetchall(query, (infohash, channel))
if len(res) == 0 :
return
if len(res) > 1 :
raise IOError("Metadata DB constraint violated")
metadata_fk = res[0][0]
self._deleteAllSubtitles(metadata_fk, False)
query = QUERIES["DELETE METADATA PK"]
self._db.execute_write(query, (metadata_fk,), False)
self._db.commit()
def _deleteAllSubtitles(self, metadata_fk, commitNow):
query = QUERIES["DELETE ALL SUBTITLES"]
self._db.execute_write(query, (metadata_fk,), commitNow)
def getAllLocalSubtitles(self):
'''
Returns a structure containing all the subtitleInfos that are pointing
to a local path
@return a dictionary like this:
{ ...
channel1 : { infohash1 : [ SubtitleInfo1, ...] }
...
}
'''
query = QUERIES["SELECT SUBTITLES WITH PATH"]
res = self._db.fetchall(query)
result = {}
for entry in res:
# fk = entry[0]
path = entry[1]
lang = entry[2]
checksum = str2bin(entry[3])
channel = str2bin(entry[4])
infohash = str2bin(entry[5])
s = SubtitleInfo(lang, path, checksum)
if channel not in result:
result[channel] = {}
if infohash not in result[channel]:
result[channel][infohash] = []
result[channel][infohash].append(s)
return result
def getLocalSubtitles(self, channel, infohash):
'''
Returns a dictionary containing all the subtitles pointing
to a local pathm for the given channel, infohash
@param channel: binary channel_id(permid)
@param infohash: binary infohash
@rtype: dict
@return: a dictionary like this:
{
...
langCode : SubtitleInfo,
...
}
The dictionary will be empty if no local subtitle
is available.
'''
query = QUERIES["SELECT SUBTITLES WITH PATH BY CHN INFO"]
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query,(channel,infohash))
result = {}
for entry in res:
location = entry[0]
language = entry[1]
checksum = str2bin(entry[2])
subInfo = SubtitleInfo(language, location, checksum)
result[language] = subInfo
return result
def insertHaveMask(self, channel, infohash, peer_id, havemask, timestamp=None):
'''
Store a received have mask in the db
Each inserted rows represent a delcaration of subtitle
availability from peer_id, for some subtitles for
a torrent identified by infohash in a channel identified
by channel.
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@type peer_id: str
@param peer_id: peer from whom the infomask was received.(ie its binary permid)
@type havemask: int
@param havemask: a non-negative integer. It must be smaller
then 2**32.
@precondition: an entry for (channel, infohash) must already
exist in the database
'''
query = QUERIES["SELECT METADATA"]
if timestamp is None:
timestamp = int(time.time())
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
res = self._db.fetchall(query, (infohash, channel))
if len(res) != 1:
raise MetadataDBException("No entry in the MetadataDB for %s, %s" %\
(channel[-10:],infohash))
metadata_fk = res[0][0]
insertQuery = QUERIES["INSERT HAVE MASK"]
try:
self._db.execute_write(insertQuery, (metadata_fk, peer_id, havemask, timestamp))
except sqlite3.IntegrityError,e:
raise MetadataDBException(str(e))
def updateHaveMask(self,channel,infohash,peer_id, newMask, timestamp=None):
'''
Store a received have mask in the db
(See insertHaveMask for description)
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@type peer_id: str
@param peer_id: peer from whom the infomask was received.(ie its binary permid)
@type havemask: int
"param havemask: a non-negative integer. It must be smaller
then 2**32.
'''
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
updateQuery = QUERIES["UPDATE HAVE MASK"]
if timestamp is None:
timestamp = int(time.time())
self._db.execute_write(updateQuery,
(newMask,timestamp,peer_id, channel, infohash))
def deleteHaveEntry(self, channel, infohash, peer_id):
'''
Delete a row from the SubtitlesHave db.
If the row is not in the db nothing happens.
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@type peer_id: str
@param peer_id: peer from whom the infomask was received.(ie its binary permid)
@postcondition: if a row identified by channel, infohash, peer_id
was in the database, it will no longer be there
at the end of this method call
'''
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
deleteQuery = QUERIES["DELETE HAVE"]
self._db.execute_write(deleteQuery,
(peer_id,channel,infohash))
def getHaveMask(self, channel, infohash, peer_id):
'''
Returns the have mask for a single peer if available.
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@type peer_id: str
@param peer_id: peer from whom the infomask was received.(ie its binary permid)
@rtype: int
@return: the have mask relative to channel, infohash, and peer.
If not available returns None
@postcondition: the return value is either None or a non-negative
integer smaller then 2**32
'''
query = QUERIES["GET ONE HAVE MASK"]
channel = bin2str(channel)
infohash = bin2str(infohash)
peer_id = bin2str(peer_id)
res = self._db.fetchall(query,(channel,infohash,peer_id))
if len(res) <= 0:
return None
elif len(res) > 1:
raise AssertionError("channel,infohash,peer_id should be unique")
else:
return res[0][0]
def getHaveEntries(self, channel, infohash):
'''
Return a list of have entries for subtitles for a torrent
in a channel.
This method returns a list of tuple, like:
[
...
(peer_id, haveMask, timestamp),
...
]
(peer_id) is the perm_id of a Tribler
Peer, haveMask is an integer value representing a
bitmask of subtitles owned by that peer.
Timestamp is the timestamp at the time the havemask
was received.
The results are ordered by descending timestamp.
If there are no
entris for the givenn channel,infohash pair, the returned
list will be empty
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@rtype: list
@return: see description
'''
query = QUERIES["GET ALL HAVE MASK"]
channel = bin2str(channel)
infohash = bin2str(infohash)
res = self._db.fetchall(query,(channel,infohash))
returnlist = list()
for entry in res:
peer_id = str2bin(entry[0])
haveMask = entry[1]
timestamp = entry[2]
returnlist.append((peer_id, haveMask, timestamp))
return returnlist
def cleanupOldHave(self, limit_ts):
'''
Remove from the SubtitlesHave database every entry
received at a timestamp that is (strictly) less then limit_ts
This method does not remove have messages sent by
the publisher of the channel.
@type limit_ts: int
@param limit_ts: a timestamp. All the entries in the
database having timestamp lessere then
limit_ts will be removed, excpet if
they were received by the publisher
of the channel
'''
cleanupQuery = QUERIES["CLEANUP OLD HAVE"]
self._db.execute_write(cleanupQuery,(limit_ts,))
def insertOrUpdateHave(self, channel, infohash, peer_id, havemask, timestamp=None):
'''
Store a received have mask in the db
Each inserted rows represent a delcaration of subtitle
availability from peer_id, for some subtitles for
a torrent identified by infohash in a channel identified
by channel.
If a row for the given (channel, infohash, peer_id) it
is updated accordingly to the parameters. Otherwise
a new row is added to the db
@type channel: str
@param channel: channel_id (binary)
@type infohash: str
@param infohash: the infohash of a torrent (binary)
@type peer_id: str
@param peer_id: peer from whom the infomask was received.(ie its binary permid)
@type havemask: int
@param havemask: a non-negative integer. It must be smaller
then 2**32.
@precondition: an entry for (channel, infohash) must already
exist in the database
'''
if timestamp is None:
timestamp = int(time.time())
if self.getHaveMask(channel, infohash, peer_id) is not None:
self.updateHaveMask(channel, infohash, peer_id, havemask, timestamp)
else:
self.insertHaveMask(channel, infohash, peer_id, havemask, timestamp)
| egbertbouman/tribler-g | Tribler/Core/CacheDB/MetadataDBHandler.py | Python | lgpl-2.1 | 40,875 |
__author__ = 'fpena'
import numpy as np
import lda
import lda.datasets
def run():
# document-term matrix
X = lda.datasets.load_reuters()
print("type(X): {}".format(type(X)))
print("shape: {}\n".format(X.shape))
# the vocab
vocab = lda.datasets.load_reuters_vocab()
print("type(vocab): {}".format(type(vocab)))
print("len(vocab): {}\n".format(len(vocab)))
# titles for each story
titles = lda.datasets.load_reuters_titles()
print("type(titles): {}".format(type(titles)))
print("len(titles): {}\n".format(len(titles)))
doc_id = 0
word_id = 3117
print("doc id: {} word id: {}".format(doc_id, word_id))
print("-- count: {}".format(X[doc_id, word_id]))
print("-- word : {}".format(vocab[word_id]))
print("-- doc : {}".format(titles[doc_id]))
model = lda.LDA(n_topics=20, n_iter=500, random_state=1)
model.fit(X)
topic_word = model.topic_word_
print("type(topic_word): {}".format(type(topic_word)))
print("shape: {}".format(topic_word.shape))
for n in range(5):
sum_pr = sum(topic_word[n,:])
print("topic: {} sum: {}".format(n, sum_pr))
n = 5
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n+1):-1]
print('*Topic {}\n- {}'.format(i, ' '.join(topic_words)))
doc_topic = model.doc_topic_
print("type(doc_topic): {}".format(type(doc_topic)))
print("shape: {}".format(doc_topic.shape))
for n in range(5):
sum_pr = sum(doc_topic[n,:])
print("document: {} sum: {}".format(n, sum_pr))
for n in range(10):
topic_most_pr = doc_topic[n].argmax()
print("doc: {} topic: {}\n{}...".format(n,
topic_most_pr,
titles[n][:50]))
reuters_dataset = lda.datasets.load_reuters()
vocab = lda.datasets.load_reuters_vocab()
titles = lda.datasets.load_reuters_titles()
print('Dataset shape', reuters_dataset.shape)
print(reuters_dataset[0].shape)
print('Vocab shape', len(vocab))
print(vocab[0])
print('Titles shape', len(titles))
print(titles[0])
print(titles[1])
print(titles[100])
for word in reuters_dataset[0]:
if word > 1:
print(word)
| melqkiades/yelp | source/python/topicmodeling/external/lda_testing.py | Python | lgpl-2.1 | 2,284 |
#####################################################################
# linktest_rsp_header.py
#
# (c) Copyright 2021, Benjamin Parzella. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#####################################################################
"""Header for the hsms linktest response."""
from .header import HsmsHeader
class HsmsLinktestRspHeader(HsmsHeader):
"""
Header for Linktest Response.
Header for message with SType 6.
"""
def __init__(self, system):
"""
Initialize a hsms linktest response.
:param system: message ID
:type system: integer
**Example**::
>>> import secsgem.hsms
>>>
>>> secsgem.hsms.HsmsLinktestRspHeader(10)
HsmsLinktestRspHeader({sessionID:0xffff, stream:00, function:00, pType:0x00, sType:0x06, \
system:0x0000000a, requireResponse:False})
"""
HsmsHeader.__init__(self, system, 0xFFFF)
self.requireResponse = False
self.stream = 0x00
self.function = 0x00
self.pType = 0x00
self.sType = 0x06
| bparzella/secsgem | secsgem/hsms/linktest_rsp_header.py | Python | lgpl-2.1 | 1,587 |
#! /bin/env python
import sys, time, os
import pymedia.muxer as muxer
import pymedia.video.vcodec as vcodec
import pymedia.audio.acodec as acodec
import pymedia.audio.sound as sound
if os.environ.has_key( 'PYCAR_DISPLAY' ) and os.environ[ 'PYCAR_DISPLAY' ]== 'directfb':
import pydfb as pygame
YV12= pygame.PF_YV12
else:
import pygame
YV12= pygame.YV12_OVERLAY
def videoDecodeBenchmark( inFile, opt ):
pygame.init()
pygame.display.set_mode( (800,600), 0 )
ovl= None
dm= muxer.Demuxer( inFile.split( '.' )[ -1 ] )
f= open( inFile, 'rb' )
s= f.read( 400000 )
r= dm.parse( s )
v= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_VIDEO, dm.streams )
if len( v )== 0:
raise 'There is no video stream in a file %s' % inFile
v_id= v[ 0 ][ 'index' ]
print 'Assume video stream at %d index: ' % v_id
a= filter( lambda x: x[ 'type' ]== muxer.CODEC_TYPE_AUDIO, dm.streams )
if len( a )== 0:
print 'There is no audio stream in a file %s. Ignoring audio.' % inFile
opt= 'noaudio'
else:
a_id= a[ 0 ][ 'index' ]
t= time.time()
vc= vcodec.Decoder( dm.streams[ v_id ] )
print dm.streams[ v_id ]
if opt!= 'noaudio':
ac= acodec.Decoder( dm.streams[ a_id ] )
resampler= None
frames= 0
q= []
while len( s )> 0:
for fr in r:
if fr[ 0 ]== v_id:
d= vc.decode( fr[ 1 ] )
if d and d.data:
frames+= 1
#ff= open( 'c:\\test', 'wb' )
#ff.write( d.data[ 0 ] )
#ff.close()
if not ovl:
ovl= pygame.Overlay( YV12, d.size )
q.append( d )
if len( q )> 4:
try:
ovl.set_data( q[0].data )
ovl.display()
except:
ovl.display(q[0].data)
del( q[0] )
elif opt!= 'noaudio' and fr[ 0 ]== a_id:
d= ac.decode( fr[ 1 ] )
if resampler== None:
if d and d.channels> 2:
resampler= sound.Resampler( (d.sample_rate,d.channels), (d.sample_rate,2) )
else:
data= resampler.resample( d.data )
s= f.read( 400000 )
r= dm.parse( s )
tt= time.time()- t
print '%d frames in %d secs( %.02f fps )' % ( frames, tt, float(frames)/tt )
ev= pygame.event.get()
for e in ev:
if e.type== pygame.KEYDOWN and e.key== pygame.K_ESCAPE:
s= ''
break
if __name__== '__main__':
if len( sys.argv )< 2 or len( sys.argv )> 3:
print "Usage: video_bench <in_file> [ noaudio ]"
else:
s= ''
if len( sys.argv )> 2:
if sys.argv[ 2 ] not in ( 'noaudio' ):
print "Option %s not recognized. Should be 'noaudio'. Ignored..." % sys.argv[ 2 ]
else:
s= sys.argv[ 2 ]
videoDecodeBenchmark( sys.argv[ 1 ], s )
| pymedia/pymedia | examples/video_bench_ovl.py | Python | lgpl-2.1 | 2,778 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RProtgenerics(RPackage):
"""S4 generic functions needed by Bioconductor proteomics packages."""
homepage = "https://bioconductor.org/packages/ProtGenerics/"
url = "https://git.bioconductor.org/packages/ProtGenerics"
list_url = homepage
version('1.8.0', git='https://git.bioconductor.org/packages/ProtGenerics', commit='b2b3bb0938e20f58fca905f6870de7dbc9dfd7a3')
depends_on('[email protected]:3.4.9', when='@1.8.0')
| lgarren/spack | var/spack/repos/builtin/packages/r-protgenerics/package.py | Python | lgpl-2.1 | 1,699 |
#
# Copyright (C) 2008 Cournapeau David <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the author nor the names of any contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import warnings
import numpy as np
if sys.platform[:5] == 'linux':
BACKEND = 'ALSA'
elif sys.platform[:6] == 'darwin':
BACKEND = 'CoreAudio'
else:
BACKEND = None
if BACKEND == 'ALSA':
try:
from audiolab.soundio._alsa_backend import AlsaDevice
except ImportError, e:
warnings.warn("Could not import alsa backend; most probably, "
"you did not have alsa headers when building audiolab")
def _play(input, fs):
if input.ndim == 1:
input = input[np.newaxis, :]
nc = 1
elif input.ndim == 2:
nc = input.shape[0]
else:
raise ValueError, \
"Only input of rank 1 and 2 supported for now."
dev = AlsaDevice(fs=fs, nchannels=nc)
dev.play(input)
elif BACKEND == 'CoreAudio':
try:
from audiolab.soundio.macosx_backend import CoreAudioDevice
except ImportError, e:
print e
warnings.warn("Could not import CoreAudio backend; most probably, "
"you did not have CoreAudio headers when building audiolab")
def _play(input, fs):
if input.ndim == 1:
input = input[np.newaxis, :]
nc = 1
elif input.ndim == 2:
nc = input.shape[0]
else:
raise ValueError, \
"Only input of rank 1 and 2 supported for now."
dev = CoreAudioDevice(fs=fs, nchannels=nc)
dev.play(input)
else:
def _play(input, fs):
raise NotImplementedError, \
"No Backend implemented for your platform " \
"(detected platform is: %s)" % sys.platform
def play(input, fs=44100):
"""Play the signal in vector input to the default output device.
Only floating point input are supported: input is assumed to be in the
range [-1.0, 1.0]. Any values outside this range will be clipped by the
device.
Parameters
----------
input : array
input signal of rank 2. Each row is assumed to be one channel.
fs : int
sampling rate (in Hz)
Notes
-----
It will fail if the sampling rate is not supported by your device. In
particular, no automatic resampling is done. Mono signals are doubled for
fake stereo for the CoreAudio framework, as it seems CoreAudio does not
handle mono on its own.
"""
return _play(input, fs)
| cournape/audiolab | audiolab/soundio/play.py | Python | lgpl-2.1 | 4,020 |
#!/usr/bin/env python
## \file config.py
# \brief python package for config
# \author T. Lukaczyk, F. Palacios
# \version 3.2.9 "eagle"
#
# SU2 Lead Developers: Dr. Francisco Palacios ([email protected]).
# Dr. Thomas D. Economon ([email protected]).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2015 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
import numpy as np
from ..util import bunch, ordered_bunch, switch
from .tools import *
from config_options import *
try:
from collections import OrderedDict
except ImportError:
from ..util.ordered_dict import OrderedDict
inf = 1.0e20
# ----------------------------------------------------------------------
# Configuration Class
# ----------------------------------------------------------------------
class Config(ordered_bunch):
""" config = SU2.io.Config(filename="")
Starts a config class, an extension of
ordered_bunch()
use 1: initialize by reading config file
config = SU2.io.Config('filename')
use 2: initialize from dictionary or bunch
config = SU2.io.Config(param_dict)
use 3: initialize empty
config = SU2.io.Config()
Parameters can be accessed by item or attribute
ie: config['MESH_FILENAME'] or config.MESH_FILENAME
Methods:
read() - read from a config file
write() - write to a config file (requires existing file)
dump() - dump a raw config file
unpack_dvs() - unpack a design vector
diff() - returns the difference from another config
dist() - computes the distance from another config
"""
_filename = 'config.cfg'
def __init__(self,*args,**kwarg):
# look for filename in inputs
if args and isinstance(args[0],str):
filename = args[0]
args = args[1:]
elif kwarg.has_key('filename'):
filename = kwarg['filename']
del kwarg['filename']
else:
filename = ''
# initialize ordered bunch
super(Config,self).__init__(*args,**kwarg)
# read config if it exists
if filename:
try:
self.read(filename)
except:
raise IOError , 'Could not find config file: %s' % filename
self._filename = filename
def read(self,filename):
""" reads from a config file """
konfig = read_config(filename)
self.update(konfig)
def write(self,filename=''):
""" updates an existing config file """
if not filename: filename = self._filename
assert os.path.exists(filename) , 'must write over an existing config file'
write_config(filename,self)
def dump(self,filename=''):
""" dumps all items in the config bunch, without comments """
if not filename: filename = self._filename
dump_config(filename,self)
def __getattr__(self,k):
try:
return super(Config,self).__getattr__(k)
except AttributeError:
raise AttributeError , 'Config parameter not found'
def __getitem__(self,k):
try:
return super(Config,self).__getitem__(k)
except KeyError:
raise KeyError , 'Config parameter not found: %s' % k
def unpack_dvs(self,dv_new,dv_old=None):
""" updates config with design variable vectors
will scale according to each DEFINITION_DV scale parameter
Modifies:
DV_KIND
DV_MARKER
DV_PARAM
DV_VALUE_OLD
DV_VALUE_NEW
Inputs:
dv_new - list or array of new dv values
dv_old - optional, list or array of old dv values, defaults to zeros
"""
dv_new = copy.deepcopy(dv_new)
dv_old = copy.deepcopy(dv_old)
# handle unpacking cases
def_dv = self['DEFINITION_DV']
n_dv = len(def_dv['KIND'])
if not dv_old: dv_old = [0.0]*n_dv
assert len(dv_new) == len(dv_old) , 'unexpected design vector length'
# handle param
param_dv = self['DV_PARAM']
# apply scale
dv_scales = def_dv['SCALE']
dv_new = [ dv_new[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
dv_old = [ dv_old[i]*dv_scl for i,dv_scl in enumerate(dv_scales) ]
# Change the parameters of the design variables
self['DV_KIND'] = def_dv['KIND']
param_dv['PARAM'] = def_dv['PARAM']
param_dv['FFDTAG'] = def_dv['FFDTAG']
self.update({ 'DV_MARKER' : def_dv['MARKER'][0] ,
'DV_VALUE_OLD' : dv_old ,
'DV_VALUE_NEW' : dv_new })
def __eq__(self,konfig):
return super(Config,self).__eq__(konfig)
def __ne__(self,konfig):
return super(Config,self).__ne__(konfig)
def local_files(self):
""" removes path prefix from all *_FILENAME params
"""
for key,value in self.iteritems():
if key.split('_')[-1] == 'FILENAME':
self[key] = os.path.basename(value)
def diff(self,konfig):
""" compares self to another config
Inputs:
konfig - a second config
Outputs:
config_diff - a config containing only the differing
keys, each with values of a list of the different
config values.
for example:
config_diff.MATH_PROBLEM = ['DIRECT','ADJOINT']
"""
keys = set([])
keys.update( self.keys() )
keys.update( konfig.keys() )
konfig_diff = Config()
for key in keys:
value1 = self.get(key,None)
value2 = konfig.get(key,None)
if not value1 == value2:
konfig_diff[key] = [value1,value2]
return konfig_diff
def dist(self,konfig,keys_check='ALL'):
""" calculates a distance to another config
Inputs:
konfig - a second config
keys_check - optional, a list of keys to check
Outputs:
distance - a float
Currently only works for DV_VALUE_NEW and DV_VALUE_OLD
Returns a large value otherwise
"""
konfig_diff = self.diff(konfig)
if keys_check == 'ALL':
keys_check = konfig_diff.keys()
distance = 0.0
for key in keys_check:
if konfig_diff.has_key(key):
val1 = konfig_diff[key][0]
val2 = konfig_diff[key][1]
if key in ['DV_VALUE_NEW',
'DV_VALUE_OLD']:
val1 = np.array( val1 )
val2 = np.array( val2 )
this_diff = np.sqrt( np.sum( (val1-val2)**2 ) )
else:
print 'Warning, unexpected config difference'
this_diff = inf
distance += this_diff
#: if key different
#: for each keys_check
return distance
def __repr__(self):
#return '<Config> %s' % self._filename
return self.__str__()
def __str__(self):
output = 'Config: %s' % self._filename
for k,v in self.iteritems():
output += '\n %s= %s' % (k,v)
return output
#: class Config
# -------------------------------------------------------------------
# Get SU2 Configuration Parameters
# -------------------------------------------------------------------
def read_config(filename):
""" reads a config file """
# initialize output dictionary
data_dict = OrderedDict()
input_file = open(filename)
# process each line
while 1:
# read the line
line = input_file.readline()
if not line:
break
# remove line returns
line = line.strip('\r\n')
# make sure it has useful data
if (not "=" in line) or (line[0] == '%'):
continue
# split across equals sign
line = line.split("=",1)
this_param = line[0].strip()
this_value = line[1].strip()
assert not data_dict.has_key(this_param) , ('Config file has multiple specifications of %s' % this_param )
for case in switch(this_param):
# comma delimited lists of strings with or without paren's
if case("MARKER_EULER") : pass
if case("MARKER_FAR") : pass
if case("MARKER_PLOTTING") : pass
if case("MARKER_MONITORING") : pass
if case("MARKER_SYM") : pass
if case("DV_KIND") :
# remove white space
this_value = ''.join(this_value.split())
# remove parens
this_value = this_value.strip('()')
# split by comma
data_dict[this_param] = this_value.split(",")
break
# semicolon delimited lists of comma delimited lists of floats
if case("DV_PARAM"):
# remove white space
info_General = ''.join(this_value.split())
# split by semicolon
info_General = info_General.split(';')
# build list of dv params, convert string to float
dv_Parameters = []
dv_FFDTag = []
for this_dvParam in info_General:
this_dvParam = this_dvParam.strip('()')
this_dvParam = this_dvParam.split(",")
# if FFD change the first element to work with numbers and float(x)
if data_dict["DV_KIND"][0] in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
this_dvFFDTag = this_dvParam[0]
this_dvParam[0] = '0'
else:
this_dvFFDTag = []
this_dvParam = [ float(x) for x in this_dvParam ]
dv_FFDTag = dv_FFDTag + [this_dvFFDTag]
dv_Parameters = dv_Parameters + [this_dvParam]
# store in a dictionary
dv_Definitions = { 'FFDTAG' : dv_FFDTag ,
'PARAM' : dv_Parameters }
data_dict[this_param] = dv_Definitions
break
# comma delimited lists of floats
if case("DV_VALUE_OLD") : pass
if case("DV_VALUE_NEW") : pass
if case("DV_VALUE") :
# remove white space
this_value = ''.join(this_value.split())
# split by comma, map to float, store in dictionary
data_dict[this_param] = map(float,this_value.split(","))
break
# float parameters
if case("MACH_NUMBER") : pass
if case("AoA") : pass
if case("FIN_DIFF_STEP") : pass
if case("CFL_NUMBER") : pass
if case("WRT_SOL_FREQ") :
data_dict[this_param] = float(this_value)
break
# int parameters
if case("NUMBER_PART") : pass
if case("AVAILABLE_PROC") : pass
if case("EXT_ITER") : pass
if case("TIME_INSTANCES") : pass
if case("UNST_ADJOINT_ITER") : pass
if case("ITER_AVERAGE_OBJ") : pass
if case("ADAPT_CYCLES") :
data_dict[this_param] = int(this_value)
break
# unitary design variable definition
if case("DEFINITION_DV"):
# remove white space
this_value = ''.join(this_value.split())
# split into unitary definitions
info_Unitary = this_value.split(";")
# process each Design Variable
dv_Kind = []
dv_Scale = []
dv_Markers = []
dv_FFDTag = []
dv_Parameters = []
for this_General in info_Unitary:
if not this_General: continue
# split each unitary definition into one general definition
info_General = this_General.strip("()").split("|") # check for needed strip()?
# split information for dv Kinds
info_Kind = info_General[0].split(",")
# pull processed dv values
this_dvKind = get_dvKind( int( info_Kind[0] ) )
this_dvScale = float( info_Kind[1] )
this_dvMarkers = info_General[1].split(",")
if this_dvKind=='MACH_NUMBER' or this_dvKind=='AOA':
this_dvParameters = []
else:
this_dvParameters = info_General[2].split(",")
# if FFD change the first element to work with numbers and float(x), save also the tag
if this_dvKind in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
this_dvFFDTag = this_dvParameters[0]
this_dvParameters[0] = '0'
else:
this_dvFFDTag = []
this_dvParameters = [ float(x) for x in this_dvParameters ]
# add to lists
dv_Kind = dv_Kind + [this_dvKind]
dv_Scale = dv_Scale + [this_dvScale]
dv_Markers = dv_Markers + [this_dvMarkers]
dv_FFDTag = dv_FFDTag + [this_dvFFDTag]
dv_Parameters = dv_Parameters + [this_dvParameters]
# store in a dictionary
dv_Definitions = { 'KIND' : dv_Kind ,
'SCALE' : dv_Scale ,
'MARKER' : dv_Markers ,
'FFDTAG' : dv_FFDTag ,
'PARAM' : dv_Parameters }
# save to output dictionary
data_dict[this_param] = dv_Definitions
break
# unitary objective definition
if case('OPT_OBJECTIVE'):
# remove white space
this_value = ''.join(this_value.split())
# split by scale
this_value = this_value.split("*")
this_name = this_value[0]
this_scale = 1.0
if len(this_value) > 1:
this_scale = float( this_value[1] )
this_def = { this_name : {'SCALE':this_scale} }
# save to output dictionary
data_dict[this_param] = this_def
break
# unitary constraint definition
if case('OPT_CONSTRAINT'):
# remove white space
this_value = ''.join(this_value.split())
# check for none case
if this_value == 'NONE':
data_dict[this_param] = {'EQUALITY':OrderedDict(), 'INEQUALITY':OrderedDict()}
break
# split definitions
this_value = this_value.split(';')
this_def = OrderedDict()
for this_con in this_value:
if not this_con: continue # if no definition
# defaults
this_obj = 'NONE'
this_sgn = '='
this_scl = 1.0
this_val = 0.0
# split scale if present
this_con = this_con.split('*')
if len(this_con) > 1:
this_scl = float( this_con[1] )
this_con = this_con[0]
# find sign
for this_sgn in ['<','>','=']:
if this_sgn in this_con: break
# split sign, store objective and value
this_con = this_con.strip('()').split(this_sgn)
assert len(this_con) == 2 , 'incorrect constraint definition'
this_obj = this_con[0]
this_val = float( this_con[1] )
# store in dictionary
this_def[this_obj] = { 'SIGN' : this_sgn ,
'VALUE' : this_val ,
'SCALE' : this_scl }
#: for each constraint definition
# sort constraints by type
this_sort = { 'EQUALITY' : OrderedDict() ,
'INEQUALITY' : OrderedDict() }
for key,value in this_def.iteritems():
if value['SIGN'] == '=':
this_sort['EQUALITY'][key] = value
else:
this_sort['INEQUALITY'][key] = value
#: for each definition
# save to output dictionary
data_dict[this_param] = this_sort
break
# otherwise
# string parameters
if case():
data_dict[this_param] = this_value
break
#: if case DEFINITION_DV
#: for case
#: for line
#hack - twl
if not data_dict.has_key('DV_VALUE_NEW'):
data_dict['DV_VALUE_NEW'] = [0]
if not data_dict.has_key('DV_VALUE_OLD'):
data_dict['DV_VALUE_OLD'] = [0]
if not data_dict.has_key('OPT_ITERATIONS'):
data_dict['OPT_ITERATIONS'] = 100
if not data_dict.has_key('OPT_ACCURACY'):
data_dict['OPT_ACCURACY'] = 1e-10
if not data_dict.has_key('BOUND_DV'):
data_dict['BOUND_DV'] = 1e10
return data_dict
#: def read_config()
# -------------------------------------------------------------------
# Set SU2 Configuration Parameters
# -------------------------------------------------------------------
def write_config(filename,param_dict):
""" updates an existing config file """
temp_filename = "temp.cfg"
shutil.copy(filename,temp_filename)
output_file = open(filename,"w")
# break pointers
param_dict = copy.deepcopy(param_dict)
for raw_line in open(temp_filename):
# remove line returns
line = raw_line.strip('\r\n')
# make sure it has useful data
if not "=" in line:
output_file.write(raw_line)
continue
# split across equals sign
line = line.split("=")
this_param = line[0].strip()
old_value = line[1].strip()
# skip if parameter unwanted
if not param_dict.has_key(this_param):
output_file.write(raw_line)
continue
# start writing parameter
new_value = param_dict[this_param]
output_file.write(this_param + "= ")
# handle parameter types
for case in switch(this_param):
# comma delimited list of floats
if case("DV_VALUE_NEW") : pass
if case("DV_VALUE_OLD") : pass
if case("DV_VALUE") :
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write("%s" % new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
break
# comma delimited list of strings no paren's
if case("DV_KIND") : pass
if case("TASKS") : pass
if case("GRADIENTS") :
if not isinstance(new_value,list):
new_value = [ new_value ]
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write(new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
break
# comma delimited list of strings inside paren's
if case("MARKER_EULER") : pass
if case("MARKER_FAR") : pass
if case("MARKER_PLOTTING") : pass
if case("MARKER_MONITORING") : pass
if case("MARKER_SYM") : pass
if case("DV_MARKER") :
if not isinstance(new_value,list):
new_value = [ new_value ]
output_file.write("( ")
n_lists = len(new_value)
for i_value in range(n_lists):
output_file.write(new_value[i_value])
if i_value+1 < n_lists:
output_file.write(", ")
output_file.write(" )")
break
# semicolon delimited lists of comma delimited lists
if case("DV_PARAM") :
assert isinstance(new_value['PARAM'],list) , 'incorrect specification of DV_PARAM'
if not isinstance(new_value['PARAM'][0],list): new_value = [ new_value ]
for i_value in range(len(new_value['PARAM'])):
output_file.write("( ")
this_param_list = new_value['PARAM'][i_value]
this_ffd_list = new_value['FFDTAG'][i_value]
n_lists = len(this_param_list)
if this_ffd_list != []:
output_file.write("%s, " % this_ffd_list)
for j_value in range(1,n_lists):
output_file.write("%s" % this_param_list[j_value])
if j_value+1 < n_lists:
output_file.write(", ")
else:
for j_value in range(n_lists):
output_file.write("%s" % this_param_list[j_value])
if j_value+1 < n_lists:
output_file.write(", ")
output_file.write(") ")
if i_value+1 < len(new_value['PARAM']):
output_file.write("; ")
break
# int parameters
if case("NUMBER_PART") : pass
if case("ADAPT_CYCLES") : pass
if case("TIME_INSTANCES") : pass
if case("AVAILABLE_PROC") : pass
if case("UNST_ADJOINT_ITER") : pass
if case("EXT_ITER") :
output_file.write("%i" % new_value)
break
if case("DEFINITION_DV") :
n_dv = len(new_value['KIND'])
if not n_dv:
output_file.write("NONE")
for i_dv in range(n_dv):
this_kind = new_value['KIND'][i_dv]
output_file.write("( ")
output_file.write("%i , " % get_dvID(this_kind) )
output_file.write("%s " % new_value['SCALE'][i_dv])
output_file.write("| ")
# markers
n_mark = len(new_value['MARKER'][i_dv])
for i_mark in range(n_mark):
output_file.write("%s " % new_value['MARKER'][i_dv][i_mark])
if i_mark+1 < n_mark:
output_file.write(", ")
#: for each marker
if not this_kind in ['AOA','MACH_NUMBER']:
output_file.write(" | ")
# params
if this_kind in ['FFD_SETTING','FFD_CONTROL_POINT','FFD_DIHEDRAL_ANGLE','FFD_TWIST_ANGLE','FFD_ROTATION','FFD_CAMBER','FFD_THICKNESS','FFD_CONTROL_POINT_2D','FFD_CAMBER_2D','FFD_THICKNESS_2D']:
n_param = len(new_value['PARAM'][i_dv])
output_file.write("%s , " % new_value['FFDTAG'][i_dv])
for i_param in range(1,n_param):
output_file.write("%s " % new_value['PARAM'][i_dv][i_param])
if i_param+1 < n_param:
output_file.write(", ")
else:
n_param = len(new_value['PARAM'][i_dv])
for i_param in range(n_param):
output_file.write("%s " % new_value['PARAM'][i_dv][i_param])
if i_param+1 < n_param:
output_file.write(", ")
#: for each param
output_file.write(" )")
if i_dv+1 < n_dv:
output_file.write("; ")
#: for each dv
break
if case("OPT_OBJECTIVE"):
assert len(new_value.keys())==1 , 'only one OPT_OBJECTIVE is currently supported'
i_name = 0
for name,value in new_value.iteritems():
if i_name>0: output_file.write("; ")
output_file.write( "%s * %s" % (name,value['SCALE']) )
i_name += 1
break
if case("OPT_CONSTRAINT"):
i_con = 0
for con_type in ['EQUALITY','INEQUALITY']:
this_con = new_value[con_type]
for name,value in this_con.iteritems():
if i_con>0: output_file.write("; ")
output_file.write( "( %s %s %s ) * %s"
% (name, value['SIGN'], value['VALUE'], value['SCALE']) )
i_con += 1
#: for each constraint
#: for each constraint type
if not i_con: output_file.write("NONE")
break
# default, assume string, integer or unformatted float
if case():
output_file.write('%s' % new_value)
break
#: for case
# remove from param dictionary
del param_dict[this_param]
# next line
output_file.write("\n")
#: for each line
# check that all params were used
for this_param in param_dict.keys():
if not this_param in ['JOB_NUMBER']:
print ( 'Warning: Parameter %s not found in config file and was not written' % (this_param) )
output_file.close()
os.remove( temp_filename )
#: def write_config()
def dump_config(filename,config):
''' dumps a raw config file with all options in config
and no comments
'''
# HACK - twl
if config.has_key('DV_VALUE_NEW'):
config.DV_VALUE = config.DV_VALUE_NEW
config_file = open(filename,'w')
# write dummy file
for key in config.keys():
config_file.write( '%s= 0 \n' % key )
config_file.close()
# dump data
write_config(filename,config)
| chenbojian/SU2 | SU2_PY/SU2/io/config.py | Python | lgpl-2.1 | 30,110 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
# Author: Milton Woods <[email protected]>
# Date: March 22, 2017
# Author: George Hartzell <[email protected]>
# Date: July 21, 2016
# Author: Justin Too <[email protected]>
# Date: September 6, 2015
#
import re
import os
from contextlib import contextmanager
from llnl.util.lang import match_predicate
from spack import *
class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
"""Perl 5 is a highly capable, feature-rich programming language with over
27 years of development."""
homepage = "http://www.perl.org"
# URL must remain http:// so Spack can bootstrap curl
url = "http://www.cpan.org/src/5.0/perl-5.24.1.tar.gz"
executables = [r'^perl(-?\d+.*)?$']
# see http://www.cpan.org/src/README.html for
# explanation of version numbering scheme
# Maintenance releases (even numbers, recommended)
version('5.32.0', sha256='efeb1ce1f10824190ad1cadbcccf6fdb8a5d37007d0100d2d9ae5f2b5900c0b4')
# Development releases (odd numbers)
version('5.31.7', sha256='d05c4e72128f95ef6ffad42728ecbbd0d9437290bf0f88268b51af011f26b57d')
version('5.31.4', sha256='418a7e6fe6485cc713a86d1227ef112f0bb3f80322e3b715ffe42851d97804a5')
# Maintenance releases (even numbers, recommended)
version('5.30.3', sha256='32e04c8bb7b1aecb2742a7f7ac0eabac100f38247352a73ad7fa104e39e7406f', preferred=True)
version('5.30.2', sha256='66db7df8a91979eb576fac91743644da878244cf8ee152f02cd6f5cd7a731689')
version('5.30.1', sha256='bf3d25571ff1ee94186177c2cdef87867fd6a14aa5a84f0b1fb7bf798f42f964')
version('5.30.0', sha256='851213c754d98ccff042caa40ba7a796b2cee88c5325f121be5cbb61bbf975f2')
# End of life releases
version('5.28.0', sha256='7e929f64d4cb0e9d1159d4a59fc89394e27fa1f7004d0836ca0d514685406ea8')
version('5.26.2', sha256='572f9cea625d6062f8a63b5cee9d3ee840800a001d2bb201a41b9a177ab7f70d')
version('5.24.1', sha256='e6c185c9b09bdb3f1b13f678999050c639859a7ef39c8cad418448075f5918af')
version('5.22.4', sha256='ba9ef57c2b709f2dad9c5f6acf3111d9dfac309c484801e0152edbca89ed61fa')
version('5.22.3', sha256='1b351fb4df7e62ec3c8b2a9f516103595b2601291f659fef1bbe3917e8410083')
version('5.22.2', sha256='81ad196385aa168cb8bd785031850e808c583ed18a7901d33e02d4f70ada83c2')
version('5.22.1', sha256='2b475d0849d54c4250e9cba4241b7b7291cffb45dfd083b677ca7b5d38118f27')
version('5.22.0', sha256='0c690807f5426bbd1db038e833a917ff00b988bf03cbf2447fa9ffdb34a2ab3c')
version('5.20.3', sha256='3524e3a76b71650ab2f794fd68e45c366ec375786d2ad2dca767da424bbb9b4a')
version('5.18.4', sha256='01a4e11a9a34616396c4a77b3cef51f76a297e1a2c2c490ae6138bf0351eb29f')
version('5.16.3', sha256='69cf08dca0565cec2c5c6c2f24b87f986220462556376275e5431cc2204dedb6')
extendable = True
depends_on('gdbm')
depends_on('berkeley-db')
# there has been a long fixed issue with 5.22.0 with regard to the ccflags
# definition. It is well documented here:
# https://rt.perl.org/Public/Bug/Display.html?id=126468
patch('protect-quotes-in-ccflags.patch', when='@5.22.0')
# Fix build on Fedora 28
# https://bugzilla.redhat.com/show_bug.cgi?id=1536752
patch('https://src.fedoraproject.org/rpms/perl/raw/004cea3a67df42e92ffdf4e9ac36d47a3c6a05a4/f/perl-5.26.1-guard_old_libcrypt_fix.patch', level=1, sha256='0eac10ed90aeb0459ad8851f88081d439a4e41978e586ec743069e8b059370ac', when='@:5.26.2')
# Installing cpanm alongside the core makes it safe and simple for
# people/projects to install their own sets of perl modules. Not
# having it in core increases the "energy of activation" for doing
# things cleanly.
variant('cpanm', default=True,
description='Optionally install cpanm with the core packages.')
variant('shared', default=True,
description='Build a shared libperl.so library')
variant('threads', default=True,
description='Build perl with threads support')
resource(
name="cpanm",
url="http://search.cpan.org/CPAN/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7042.tar.gz",
sha256="9da50e155df72bce55cb69f51f1dbb4b62d23740fb99f6178bb27f22ebdf8a46",
destination="cpanm",
placement="cpanm"
)
phases = ['configure', 'build', 'install']
@classmethod
def determine_version(cls, exe):
perl = spack.util.executable.Executable(exe)
output = perl('--version', output=str, error=str)
if output:
match = re.search(r'perl.*\(v([0-9.]+)\)', output)
if match:
return match.group(1)
return None
@classmethod
def determine_variants(cls, exes, version):
for exe in exes:
perl = spack.util.executable.Executable(exe)
output = perl('-V', output=str, error=str)
variants = ''
if output:
match = re.search(r'-Duseshrplib', output)
if match:
variants += '+shared'
else:
variants += '~shared'
match = re.search(r'-Duse.?threads', output)
if match:
variants += '+threads'
else:
variants += '~threads'
path = os.path.dirname(exe)
if 'cpanm' in os.listdir(path):
variants += '+cpanm'
else:
variants += '~cpanm'
return variants
# On a lustre filesystem, patch may fail when files
# aren't writeable so make pp.c user writeable
# before patching. This should probably walk the
# source and make everything writeable in the future.
def do_stage(self, mirror_only=False):
# Do Spack's regular stage
super(Perl, self).do_stage(mirror_only)
# Add write permissions on file to be patched
filename = join_path(self.stage.source_path, 'pp.c')
perm = os.stat(filename).st_mode
os.chmod(filename, perm | 0o200)
def configure_args(self):
spec = self.spec
prefix = self.prefix
config_args = [
'-des',
'-Dprefix={0}'.format(prefix),
'-Dlocincpth=' + self.spec['gdbm'].prefix.include,
'-Dloclibpth=' + self.spec['gdbm'].prefix.lib,
]
# Extensions are installed into their private tree via
# `INSTALL_BASE`/`--install_base` (see [1]) which results in a
# "predictable" installation tree that sadly does not match the
# Perl core's @INC structure. This means that when activation
# merges the extension into the extendee[2], the directory tree
# containing the extensions is not on @INC and the extensions can
# not be found.
#
# This bit prepends @INC with the directory that is used when
# extensions are activated [3].
#
# [1] https://metacpan.org/pod/ExtUtils::MakeMaker#INSTALL_BASE
# [2] via the activate method in the PackageBase class
# [3] https://metacpan.org/pod/distribution/perl/INSTALL#APPLLIB_EXP
config_args.append('-Accflags=-DAPPLLIB_EXP=\\"' +
self.prefix.lib.perl5 + '\\"')
# Discussion of -fPIC for Intel at:
# https://github.com/spack/spack/pull/3081 and
# https://github.com/spack/spack/pull/4416
if spec.satisfies('%intel'):
config_args.append('-Accflags={0}'.format(
self.compiler.cc_pic_flag))
if '+shared' in spec:
config_args.append('-Duseshrplib')
if '+threads' in spec:
config_args.append('-Dusethreads')
if spec.satisfies('@5.31'):
config_args.append('-Dusedevel')
return config_args
def configure(self, spec, prefix):
configure = Executable('./Configure')
configure(*self.configure_args())
def build(self, spec, prefix):
make()
@run_after('build')
@on_package_attributes(run_tests=True)
def test(self):
make('test')
def install(self, spec, prefix):
make('install')
@run_after('install')
def install_cpanm(self):
spec = self.spec
if '+cpanm' in spec:
with working_dir(join_path('cpanm', 'cpanm')):
perl = spec['perl'].command
perl('Makefile.PL')
make()
make('install')
def _setup_dependent_env(self, env, dependent_spec, deptypes):
"""Set PATH and PERL5LIB to include the extension and
any other perl extensions it depends on,
assuming they were installed with INSTALL_BASE defined."""
perl_lib_dirs = []
for d in dependent_spec.traverse(deptype=deptypes):
if d.package.extends(self.spec):
perl_lib_dirs.append(d.prefix.lib.perl5)
if perl_lib_dirs:
perl_lib_path = ':'.join(perl_lib_dirs)
env.prepend_path('PERL5LIB', perl_lib_path)
def setup_dependent_build_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec,
deptypes=('build', 'run'))
def setup_dependent_run_environment(self, env, dependent_spec):
self._setup_dependent_env(env, dependent_spec, deptypes=('run',))
def setup_dependent_package(self, module, dependent_spec):
"""Called before perl modules' install() methods.
In most cases, extensions will only need to have one line:
perl('Makefile.PL','INSTALL_BASE=%s' % self.prefix)
"""
# If system perl is used through packages.yaml
# there cannot be extensions.
if dependent_spec.package.is_extension:
# perl extension builds can have a global perl
# executable function
module.perl = self.spec['perl'].command
# Add variables for library directory
module.perl_lib_dir = dependent_spec.prefix.lib.perl5
# Make the site packages directory for extensions,
# if it does not exist already.
mkdirp(module.perl_lib_dir)
@run_after('install')
def filter_config_dot_pm(self):
"""Run after install so that Config.pm records the compiler that Spack
built the package with. If this isn't done, $Config{cc} will
be set to Spack's cc wrapper script. These files are read-only, which
frustrates filter_file on some filesystems (NFSv4), so make them
temporarily writable.
"""
kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
# Find the actual path to the installed Config.pm file.
perl = self.spec['perl'].command
config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e',
'print is_loaded(Config)', output=str)
with self.make_briefly_writable(config_dot_pm):
match = 'cc *=>.*'
substitute = "cc => '{cc}',".format(cc=self.compiler.cc)
filter_file(match, substitute, config_dot_pm, **kwargs)
# And the path Config_heavy.pl
d = os.path.dirname(config_dot_pm)
config_heavy = join_path(d, 'Config_heavy.pl')
with self.make_briefly_writable(config_heavy):
match = '^cc=.*'
substitute = "cc='{cc}'".format(cc=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = '^ld=.*'
substitute = "ld='{ld}'".format(ld=self.compiler.cc)
filter_file(match, substitute, config_heavy, **kwargs)
match = "^ccflags='"
substitute = "ccflags='%s " % ' '\
.join(self.spec.compiler_flags['cflags'])
filter_file(match, substitute, config_heavy, **kwargs)
@contextmanager
def make_briefly_writable(self, path):
"""Temporarily make a file writable, then reset"""
perm = os.stat(path).st_mode
os.chmod(path, perm | 0o200)
yield
os.chmod(path, perm)
# ========================================================================
# Handle specifics of activating and deactivating perl modules.
# ========================================================================
def perl_ignore(self, ext_pkg, args):
"""Add some ignore files to activate/deactivate args."""
ignore_arg = args.get('ignore', lambda f: False)
# Many perl packages describe themselves in a perllocal.pod file,
# so the files conflict when multiple packages are activated.
# We could merge the perllocal.pod files in activated packages,
# but this is unnecessary for correct operation of perl.
# For simplicity, we simply ignore all perllocal.pod files:
patterns = [r'perllocal\.pod$']
return match_predicate(ignore_arg, patterns)
def activate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).activate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
exts[ext_pkg.name] = ext_pkg.spec
def deactivate(self, ext_pkg, view, **args):
ignore = self.perl_ignore(ext_pkg, args)
args.update(ignore=ignore)
super(Perl, self).deactivate(ext_pkg, view, **args)
extensions_layout = view.extensions_layout
exts = extensions_layout.extension_map(self.spec)
# Make deactivate idempotent
if ext_pkg.name in exts:
del exts[ext_pkg.name]
@property
def command(self):
"""Returns the Perl command, which may vary depending on the version
of Perl. In general, Perl comes with a ``perl`` command. However,
development releases have a ``perlX.Y.Z`` command.
Returns:
Executable: the Perl command
"""
for ver in ('', self.spec.version):
path = os.path.join(self.prefix.bin, '{0}{1}'.format(
self.spec.name, ver))
if os.path.exists(path):
return Executable(path)
else:
msg = 'Unable to locate {0} command in {1}'
raise RuntimeError(msg.format(self.spec.name, self.prefix.bin))
| rspavel/spack | var/spack/repos/builtin/packages/perl/package.py | Python | lgpl-2.1 | 14,548 |
import http.server
import urllib.parse
class BaseServer(http.server.BaseHTTPRequestHandler):
pass
HTTPServer = http.server.HTTPServer
urllib_urlparse = urllib.parse.urlparse
| cslarsen/vev | vev/server_py3.py | Python | lgpl-2.1 | 180 |
# -*- coding: utf-8 -*-
"""
An x11 bridge provides a secure/firewalled link between a desktop application and the host x11 server. In this case, we use XPRA to do the bridging.
::.
------------- -------------
|desktop app| <--/tmp/.X11-unix--> |xpra server| Untrusted
------------- -------------
^
| ~/.xpra
v
------------- -------------
| host | <--/tmp/.X11-unix--> |xpra client| Trusted
------------- -------------
This configuration involves 3 containers.
1) contains the untrusted desktop application
2) contains an untrusted xpra server
3) contains a trusted xpra client
I up-to-date version of xpra can be used, xpra need not be installed on the host.
"""
#external imports
import os
import time
import shutil
import errno
import sys
import hashlib
#internal imports
from subuserlib.classes.service import Service
from collections import OrderedDict
import subuserlib.verify
import subuserlib.subuser
class XpraX11Bridge(Service):
def __init__(self,user,subuser):
self.subuser = subuser
Service.__init__(self,user,subuser)
self.name = "xpra"
def isSetup(self):
clientSubuserInstalled = self.getClientSubuserName() in self.user.registry.subusers
serverSubuserInstalled = self.getServerSubuserName() in self.user.registry.subusers
return clientSubuserInstalled and serverSubuserInstalled
def getSubuserSpecificServerPermissions(self):
"""
Get the dictionary of permissions that are specific to this particular subuser and therefore are not packaged in the xpra server image source.
"""
permissions = OrderedDict()
permissions["system-dirs"] = OrderedDict(
[ (self.getXpraHomeDir(),"/home/subuser")
, (self.getServerSideX11Path(),"/tmp/.X11-unix") ])
return permissions
def getSubuserSpecificClientPermissions(self):
"""
Get the dictionary of permissions that are specific to this particular subuser and therefore are not packaged in the xpra client image source.
"""
permissions = OrderedDict()
permissions["system-dirs"] = OrderedDict(
[ (self.getXpraSocket()
,os.path.join(self.getClientSubuser().dockersideHome,".xpra","server-100"))
, (self.getXpraTmpDir()
,os.path.join(self.getClientSubuser().dockersideHome,"tmp"))])
return permissions
def setupServerPermissions(self):
permissions = self.getServerSubuser().permissions
for key,value in self.getSubuserSpecificServerPermissions().items():
permissions[key] = value
permissions.save()
def setupClientPermissions(self):
permissions = self.getClientSubuser().permissions
for key,value in self.getSubuserSpecificClientPermissions().items():
permissions[key] = value
permissions.save()
def arePermissionsUpToDate(self):
areClientPermissionsUpToDate = isSubDict(self.getSubuserSpecificClientPermissions(),self.getClientSubuser().permissions)
if not areClientPermissionsUpToDate:
self.user.registry.log("Client permissions:\n"+str(self.getClientSubuser().permissions)+ "\n differ from defaults:\n"+str(self.getSubuserSpecificClientPermissions()),verbosityLevel=4)
areServerPermissionsUpToDate = isSubDict(self.getSubuserSpecificServerPermissions(),self.getServerSubuser().permissions)
if not areServerPermissionsUpToDate:
self.user.registry.log("Server permissions:\n"+str(self.getServerSubuser().permissions)+ "\n differ from defaults:\n"+str(self.getSubuserSpecificServerPermissions()),verbosityLevel=4)
return areClientPermissionsUpToDate and areServerPermissionsUpToDate
def setup(self):
"""
Do any setup required in order to create a functional bridge: Creating subusers building images ect.
"""
self.user.registry.log("Ensuring x11 bridge setup for subuser "+self.subuser.name,verbosityLevel=5)
newSubuserNames = []
if not self.isSetup():
self.addServerSubuser()
self.addClientSubuser()
newSubuserNames = [self.getServerSubuserName(),self.getClientSubuserName()]
if not self.arePermissionsUpToDate():
self.user.registry.log("Updating x11 bridge permissions for subuser "+self.subuser.name,verbosityLevel=4)
self.setupServerPermissions()
self.setupClientPermissions()
newSubuserNames = [self.getServerSubuserName(),self.getClientSubuserName()]
newSubusers = []
for newSubuserName in newSubuserNames:
newSubusers.append(self.user.registry.subusers[newSubuserName])
return newSubusers
def getXpraVolumePath(self):
return os.path.join(self.user.config["volumes-dir"],"xpra",self.subuser.name)
def getServerSideX11Path(self):
return os.path.join(self.getXpraVolumePath(),"tmp",".X11-unix")
def getXpraHomeDir(self):
return os.path.join(self.getXpraVolumePath(),"xpra-home")
def getXpraTmpDir(self):
return os.path.join(self.getXpraHomeDir(),"tmp")
def getXpraSocket(self):
return os.path.join(self.getXpraHomeDir(),".xpra",self.getServerSubuserHostname()+"-100")
def getServerSubuserHostname(self):
longHostName = "xpra-server"+hashlib.sha256(self.subuser.name.encode("utf-8")).hexdigest()
return longHostName[:63]
def getServerSubuserName(self):
return "!service-subuser-"+self.subuser.name+"-xpra-server"
def getServerSubuser(self):
return self.user.registry.subusers[self.getServerSubuserName()]
def _getPermissionsAccepter(self):
from subuserlib.classes.permissionsAccepters.acceptPermissionsAtCLI import AcceptPermissionsAtCLI
return AcceptPermissionsAtCLI(self.user,alwaysAccept=True)
def addServerSubuser(self):
try:
subuserlib.subuser.addFromImageSourceNoVerify(self.user,self.getServerSubuserName(),self.user.registry.repositories["default"]["subuser-internal-xpra-server"])
except KeyError:
sys.exit("Your default repository does not provide a subuser-internal-xpra-server. This means we cannot use the xpra-x11 bridge. Please fix the default repository or file a bug report.")
self.subuser.serviceSubuserNames.append(self.getServerSubuserName())
self.getServerSubuser().createPermissions(self.getServerSubuser().imageSource.permissions)
def getClientSubuserName(self):
return "!service-subuser-"+self.subuser.name+"-xpra-client"
def getClientSubuser(self):
return self.user.registry.subusers[self.getClientSubuserName()]
def addClientSubuser(self):
subuserlib.subuser.addFromImageSourceNoVerify(self.user,self.getClientSubuserName(),self.user.registry.repositories["default"]["subuser-internal-xpra-client"])
self.subuser.serviceSubuserNames.append(self.getClientSubuserName())
self.getClientSubuser().createPermissions(self.getClientSubuser().imageSource.permissions)
def cleanUp(self):
"""
Clear special volumes. This ensures statelessness of stateless subusers.
"""
self.user.registry.log("Cleaning up old bridge volume files.",verbosityLevel=4)
try:
shutil.rmtree(os.path.join(self.user.config["volumes-dir"],"xpra",self.subuser.name))
except OSError as e:
# We need to clean this up.
# Unfortunately, the X11 socket may still exist and will be owned by root.
# So we cannot do the clean up as a normal user.
# Fortunately, being a member of the docker group is the same as having root access.
if not e.errno == errno.ENOENT:
self.user.registry.log("An error occured while setting up xpra X11 socket.",verbosityLevel=3)
self.user.registry.log(str(e),verbosityLevel=3)
self.user.dockerDaemon.execute(["run","--rm","--volume",os.path.join(self.user.config["volumes-dir"],"xpra")+":/xpra-volume","--entrypoint","/bin/rm",self.getServerSubuser().imageId,"-rf",os.path.join("/xpra-volume/",self.subuser.name)])
def createAndSetupSpecialVolumes(self,errorCount=0):
def clearAndTryAgain():
if errorCount >= 5:
sys.exit("Failed to setup XPRA bridge volumes. You have some permissions errors with your subuser volumes directory."+self.getXpraVolumePath()+" Look at the output above and try to resolve the problem yourself. Possible causes are simple ownership problems, apparmor, SELinux. If you fail to find a simple explanation for your permissions problems. Please file a bug report.")
self.cleanUp()
self.createAndSetupSpecialVolumes(errorCount=errorCount+1)
def mkdirs(directory):
self.user.registry.log("Creating the "+directory+" directory.",verbosityLevel=4)
try:
self.user.endUser.makedirs(directory)
except OSError as e:
if e.errno == errno.EEXIST or e.errno == errno.EACCES:
self.user.registry.log(str(e),verbosityLevel=3)
self.user.registry.log("Clearing xpra X11 socket.",verbosityLevel=3)
clearAndTryAgain()
else:
raise e
self.user.registry.log("Setting up XPRA bridge volumes.",verbosityLevel=4)
mkdirs(self.getServerSideX11Path())
mkdirs(self.getXpraHomeDir())
mkdirs(self.getXpraTmpDir())
try:
os.chmod(self.getServerSideX11Path(),1023)
except OSError as e:
if e.errno == errno.EPERM:
self.user.registry.log("X11 bridge perm error, clearing a trying again.")
clearAndTryAgain()
def start(self,serviceStatus):
"""
Start the bridge.
"""
if not self.arePermissionsUpToDate():
sys.exit("The configuration of the xpra bridge has changed in a recent version. You must update the xpra bridge configuration by running\n\n$subuser repair")
self.cleanUp()
self.createAndSetupSpecialVolumes()
permissionDict = {
"system-tray": ("--system-tray" , "--no-system-tray"),
"cursors": ("--cursors", "--no-cursors"),
"clipboard": ("--clipboard","--no-clipboard")}
permissionArgs = []
for guiPermission,(on,off) in permissionDict.items():
if self.subuser.permissions["gui"][guiPermission]:
permissionArgs.append(on)
else:
permissionArgs.append(off)
commonArgs = ["--no-daemon", "--no-notifications", "--mmap", "--opengl=no"]
commonEnvVars = { "XPRA_CLIPBOARD_LIMIT" : "45"
, "XPRA_CLIPBOARDS" : "CLIPBOARD" }
# Launch xpra server
serverArgs = ["start","--no-pulseaudio","--no-mdns","--encoding=rgb"]
suppressOutput = not "SUBUSER_DEBUG_XPRA" in os.environ
serverArgs.extend(commonArgs)
serverArgs.extend(permissionArgs)
serverArgs.append(":100")
serverRuntime = self.getServerSubuser().getRuntime(os.environ)
serverRuntime.logIfInteractive("Starting xpra server...")
serverRuntime.hostname = self.getServerSubuserHostname()
self.user.registry.log("Hostname set.",verbosityLevel=4)
serverRuntime.setEnvVar("TMPDIR",os.path.join("/home/subuser","tmp"))
for arg, value in commonEnvVars.items():
serverRuntime.setEnvVar(arg, value)
serverRuntime.background = True
serverRuntime.backgroundSuppressOutput = suppressOutput
serverRuntime.setBackgroundCollectOutput(False,True)
self.user.registry.log("Entering run subrutine.",verbosityLevel=4)
(serverContainer, serverProcess) = serverRuntime.run(args=serverArgs)
self.user.registry.log("Getting server CID",verbosityLevel=4)
serviceStatus["xpra-server-service-cid"] = serverContainer.id
self.waitForContainerToLaunch("xpra is ready", serverProcess, suppressOutput)
# Launch xpra client
try:
borderColor = self.subuser.permissions["gui"]["border-color"]
if "-" in borderColor:
borderColor = "red"
except:
borderColor = "red"
clientArgs = ["attach","--no-tray","--compress=0","--encoding=rgb","--border",borderColor]
clientArgs.extend(commonArgs)
clientArgs.extend(permissionArgs)
clientRuntime = self.getClientSubuser().getRuntime(os.environ)
clientRuntime.logIfInteractive("Starting xpra client...")
clientRuntime.setEnvVar("XPRA_SOCKET_HOSTNAME","server")
clientRuntime.setEnvVar("TMPDIR",os.path.join("/home/subuser","tmp"))
for arg, value in commonEnvVars.items():
clientRuntime.setEnvVar(arg, value)
clientRuntime.background = True
clientRuntime.backgroundSuppressOutput = suppressOutput
(clientContainer, clientProcess) = clientRuntime.run(args=clientArgs)
serviceStatus["xpra-client-service-cid"] = clientContainer.id
return serviceStatus
def waitForContainerToLaunch(self, readyString, process, suppressOutput):
while True:
where = process.stderr_file.tell()
line = process.stderr_file.readline()
while (not line):# or (line[-1:] != '\n'):
time.sleep(0.1)
process.stderr_file.seek(where)
line = process.stderr_file.readline()
if not suppressOutput:
subuserlib.print.printWithoutCrashing(line[:-1])
if readyString in line:
break
process.stderr_file.close()
def stop(self,serviceStatus):
"""
Stop the bridge.
"""
self.user.dockerDaemon.getContainer(serviceStatus["xpra-client-service-cid"]).stop()
self.user.dockerDaemon.getContainer(serviceStatus["xpra-server-service-cid"]).stop()
if not "SUBUSER_DEBUG_XPRA" in os.environ:
self.cleanUp()
def isRunning(self,serviceStatus):
def isContainerRunning(cid):
container = self.user.dockerDaemon.getContainer(cid)
containerStatus = container.inspect()
if containerStatus is None:
return False
else:
if not containerStatus["State"]["Running"]:
#Clean up left over container
container.remove(force=True)
return False
else:
return True
return isContainerRunning(serviceStatus["xpra-client-service-cid"]) and isContainerRunning(serviceStatus["xpra-server-service-cid"])
def X11Bridge(user,subuser):
return bridges[user.config["x11-bridge"]](user,subuser)
bridges = {"xpra":XpraX11Bridge}
########################################################################
# Helper functions
def isSubDict(subDict,dictionary):
for key in subDict.keys():
if (not key in dictionary) or (not subDict[key] == dictionary[key]):
if (key in dictionary) and (type(subDict[key]) == type(dictionary[key]) == OrderedDict):
for innerDictKey in subDict[key].keys():
if not innerDictKey in dictionary[key]:
pass
elif not subDict[key][innerDictKey] == dictionary[key][innerDictKey]:
print(key+"."+innerDictKey+" "+str(subDict[key][innerDictKey])+" != "+str(dictionary[key][innerDictKey]))
return False
return True
| subuser-security/subuser | subuserlib/classes/subuserSubmodules/run/x11Bridge.py | Python | lgpl-3.0 | 14,679 |
"""
Contains functionality relate to interfacing wiggly and puq
"""
import binascii,os,re
import numpy as np
import matplotlib.pyplot as plt
import shapely.geometry
import CrispObjects,FuzzyObjects,fuzz,utilities
import _distributions as distributions
import puq
class ObjectManager(object):
"""
Manages Wiggly objects.
- *ignore_certain_vertices*: Used in the SA module to limit the number of runs required
when there are partially certain objects.
If True, then certain vertices (or edges) which would otherwise be added to puq as
ConstantParameters are not added at all.
================ =============================================================================
Object Type Behavior
================ =============================================================================
Rigid No effect. x,y,theta are included in the SA even if they are certain.
deformable Non-uncertain vertices are not included in the SA
vertex-defined Non-uncertain vertices are not included in the SA (todo)
edge-defined Non-uncertain edges are not included in the SA (todo)
================ =============================================================================
"""
def __init__(self,ignore_certain_vertices=False):
self._objects={} #keys are object names, values are dictionaries with keys:values
# 'obj':FObject 'group':the group obj belongs to
# 'tag':any additional information about obj
self._groups={} #keys are group names, values are lists of object names
self._classes={} #keys are class names, values are lists of object names
self._shapesFileFirstWrite=True
self._ignore_certain_vertices=ignore_certain_vertices
@staticmethod
def isShapeParam(paramname):
"""
Given a parameter name, checks whether this parameter indicates that it belongs
to a shape.
- *paramname*: a string indicating the parameter name to check
Returns: true if this parameter name indicates that it is part of a shape
"""
if paramname!=None:
if str(paramname).startswith("wXY__"):
return True
else:
return False
else:
return False
@staticmethod
def isShapeParamLegacy(paramname):
"""
Given a legacy parameter name, checks whether this parameter indicates that it belongs
to a shape.
- *paramname*: a string indicating the parameter name to check
Returns: true if this parameter name indicates that it is part of a shape.
After changing the wiggly parameter separator to __ instead of _, this
function is needed to check whether the separators should be __ instead of _
when parsing the parameters into shapes.
"""
if paramname!=None:
if str(paramname).startswith("wXY_"):
return True
else:
return False
else:
return False
@staticmethod
def getBaseShapes(shapesFileName='shapes.json'):
"""
Constructs shapely shapes out of a previously saved shapes file generated by
:func:`ObjectManager.crispObjects2PuqParams` or :func:`ObjectManager.fuzzyObjects2PuqParams`.
Returns a dictionary in the same format as :func:`ObjectManager.puq2Shapes`.
"""
if not os.path.isfile(shapesFileName):
utilities.msg('getBaseShapes:{} not found. No shapes generated'.format(shapesFileName),'w')
return {}
f=open(shapesFileName,'r')
baseShapesDict=puq.unpickle(f.read())
shapes={}
#search for all params corresponding to this shape
for shpName,shpItem in baseShapesDict.iteritems():
shp_rt=None
if len(shpItem['pts_x'])==1:
#point
shp_rt=shapely.geometry.Point(shpItem['pts_x'],shpItem['pts_y'])
elif shpItem['isClosed']:
#polygon
shp_rt=shapely.geometry.Polygon(np.vstack((shpItem['pts_x'],shpItem['pts_y'])).T)
else:
#linestring
shp_rt=shapely.geometry.LineString(np.vstack((shpItem['pts_x'],shpItem['pts_y'])).T)
if shp_rt!=None:
shapes[shpName]={'shp':shp_rt,'desc':'base shape','type':shpItem['type']}
return shapes
@staticmethod
def puq2Shapes(shapesFileName='shapes.json',params=None,paramsFileName=None):
"""
Constructs distorted shapes out of a puq parameters, using a set of base shapes.
- *shapesFileName*: a file name containing the base shapes. See :func:`crispObjects2PuqParams`
for more information.
- *params*: a list of dictionaries. Each dictionary contains the keys 'name',
'desc', 'value'. Key 'name' is the puq param name constructed using the \*2puqParams methods.
'desc' (may be None) is the description and 'value' is a single value
of the parameter.
[ {'name':<string>, 'desc':<string>, 'value':<float>}, ... ]
See :func:`crispObjects2PuqParams` for details on the convention for 'name'.
- *paramsFileName*: If set, *params* is read from file. See the paramsByFile option of
the puq TestProgram documentation.
Returns: a dictionary with keys equal to the parameter name. The parameter name is the
original name, extracted from the name generated by \*2puqParams. Values are dictionaries
with the following key:value pairs -- 'shp':Shapely shape 'desc':description.
{<paramName string>, {'shp':<Shapely object>, 'desc':<string>,
'type':<string>} 'alphacut':<float or None>, ...}
'type' is the type of object: 'D'=deformable, 'R'=rigid, 'V'=vertex-defined, 'E'=edge-defined.
Any parameters which weren't constructed with :func:`crispObjects2PuqParams` or
:func:`fuzzyObjects2PuqParams` are ignored.
"""
shapes={}
if paramsFileName!=None:
if params!=None:
print('paramsFileName set. Ignoring params')
#paramvalues is a list of tuples.
paramValues=np.loadtxt(paramsFileName,
dtype={"names":("p_name","p_val","p_desc"),
"formats":(np.object,np.float,np.object)})
params=[]
for tupleParam in paramValues:
params.append({'name':tupleParam[0], 'value':tupleParam[1],
'desc':tupleParam[2]})
else:
if params==None:
raise Exception("Must specify either params or paramsFileName")
#Parse object metadata from the puq parameter name.
#data contains the parsed information:
#key=name value=dictionary with
# 'index':list of int
# 'varType':list of string
# 'alphacut':list of float. (Empty list for probabilistic parameters)
# 'desc': list of string
# 'value': list of float
data={}
for param in params:
if param['name']!=None:
if ObjectManager.isShapeParam(param['name']) or ObjectManager.isShapeParamLegacy(param['name']):
if ObjectManager.isShapeParam(param['name']):
parse=param['name'].split('__')
else:
#for files which were made before the switch to double underscores
parse=param['name'].split('_')
#the parameter has been verified to come from *2PuqParams.
#do more validation
skipped=False
if len(parse)<4 or len(parse)>5:
print("Error: parameter {} isn't in the proper format.".format(param['name']))
skipped=True
elif parse[2]!='x' and parse[2]!='y' and parse[2]!='t' and parse[2]!='e':
print("Error: 'varType' must be x, y, or t. Got {}".format(parse[2]))
skipped=True
else:
objName=parse[1]
varType=parse[2]
index=int(parse[3])
acut=None
#get the alpha cut for objects that have it
if len(parse)==5:
acut=float(parse[4])/10.
if not skipped:
if not objName in data.keys():
data[objName]={'index':[],'varType':[],'value':[],'desc':[], 'alphacut':[]}
data[objName]['index'].append(index)
data[objName]['varType'].append(varType)
data[objName]['desc'].append(param['desc'])
data[objName]['value'].append(param['value'])
data[objName]['alphacut'].append(acut)
if skipped:
print("Skipped {}".format(param['name']))
#end if ObjectManager.isShapeParam(param['name'])
#end if param['name']!=None
#end for param in params
#open the file containing the base shapes to distort
#baseShapes is a dict with keys equal to objectname
#and values of dictionaries of properties. see shapes.json
if not os.path.isfile(shapesFileName):
utilities.msg('puq2shapes:{} not found. No shapes generated'.format(shapesFileName),'w')
return {}
f=open(shapesFileName,'r')
baseShapesDict=puq.unpickle(f.read())
#search for all params corresponding to this shape
for shpName,shpItem in baseShapesDict.iteritems():
shp_rt=None
if shpItem['type']=='R':
#rigid object. we're lookgin for 3 variables x,y,theta
if shpName in data.keys():
shp_rt=ObjectManager._puq2Shapes_rigidObjectSingle(shpName,data[shpName],shpItem)
elif shpItem['type']=='D':
#deformable object. we're looking for x,y variables for each vertex.
#building the shape is different since we don't need to use the base shape
#all the information for constructing a shape is present in the puq parameter
if shpName in data.keys():
shp_rt=ObjectManager._puq2Shapes_deformableObjectSingle(shpName,data[shpName],shpItem)
elif shpItem['type']=='V':
#vertex defined object. Same as deformable object for the purpose of
#building a single shapely shape.
if shpName in data.keys():
shp_rt=ObjectManager._puq2Shapes_vertexObjectSingle(shpName,data[shpName],shpItem)
elif shpItem['type']=='E':
#edge-defined object
if shpName in data.keys():
shp_rt=ObjectManager._puq2Shapes_edgeObjectSingle(shpName,data[shpName],shpItem)
else:
print('Type {} not supported. Ignoring'.format(shpItem['type']))
if shp_rt!=None:
shapes[shpName]={'shp':shp_rt,'desc':data[shpName]['desc'][0],
'type':shpItem['type'],'alphacut':data[shpName]['alphacut'][0]}
return shapes
@staticmethod
def _indices(lst, element):
result = []
offset = -1
while True:
try:
offset = lst.index(element, offset+1)
except ValueError:
return result
result.append(offset)
@staticmethod
def _puq2Shapes_deformableObjectSingle(objName,objData,baseShapeData):
"""
Builds a single Shapely shape out of deformable object data.
See :func:`ObjectManager._puq2Shapes_rigidObjectSingle`
"""
shp_rt=None
#do some more validation. The length of all lists must be the same
#for each object.
len1=len(objData['index'])
lens=np.r_[len(objData['varType']),len(objData['value'])]
if np.any(lens!=len1):
#the length of the vertex indices must be the same as the length of
#all the other lists
print("Error. Can't create object {}. The data was corrupt".format(objName))
else:
#ensure that we have the same number of x and y entries using generator comprehension
count_x=sum(xy=='x' for xy in objData['varType'])
count_y=sum(xy=='y' for xy in objData['varType'])
if count_x!=count_y:
print("Error for {}. There must be the same number of x and y values".format(objName))
elif count_x!=(len1/2):
print("Errpr for {}. unexpected count for 'varType'.".format(objName))
else:
pts_x,pts_y=ObjectManager._puq2shapes_deformableVertex_buildobject_helper(baseShapeData,
objData,
objName)
#we now can build the shape
do=CrispObjects.DeformableObjectFromValues(baseShapeData['pts_x'],baseShapeData['pts_y'],
baseShapeData['uncert_pts'],baseShapeData['isClosed'],
pts_x,pts_y)
shp_rt= do.realizationsPolygons[0]
return shp_rt
@staticmethod
def _puq2Shapes_rigidObjectSingle(objName,objData,baseShapeData):
"""
Builds a single Shapely shape out of rigid object data.
- *objName*: the name of the object
- *objData*: a dictionary
{'index':[int], 'varType':[string], 'desc':[string], 'value':[float]}
- *baseShapeData*: a dictionary.
Returns a Shapely shape. If there is an error, returns None (unless an exception is raised).
"""
shp_rt=None
lens=np.r_[len(objData['varType']),len(objData['value'])]
if np.any(lens!=3):
#the length of the vertex indices must be the same as the length of
#all the other lists
print("Warning. Can't create object {}. The data was corrupt".format(objName))
else:
new_x=np.nan
new_y=np.nan
theta=np.nan
for i in range(3):
xyt=objData['varType'][i]
if xyt=='x':
new_x=objData['value'][i]
elif xyt=='y':
new_y=objData['value'][i]
elif xyt=='t':
theta=objData['value'][i]
else:
raise Exception('Unexpected value for varType ({}) for rigid object {}'.format(xyt,objName))
if new_x==np.nan or new_y==np.nan or theta==np.nan:
raise Exception('rigid object {} is corrupted'.format(objName))
#now build a skeleton rigid object to hold this realization
ro=CrispObjects.RigidObjectFromValues(baseShapeData['pts_x'],baseShapeData['pts_y'],
baseShapeData['origin'][0],baseShapeData['origin'][1],
baseShapeData['uncert_pts'],baseShapeData['isClosed'],
np.r_[new_x],np.r_[new_y],np.r_[theta])
#Get the shape. there should only be 1 anyways
shp_rt=ro.realizationsPolygons[0]
return shp_rt
@staticmethod
def _puq2Shapes_edgeObjectSingle(objName,objData,baseShapeData):
"""
Builds a single Shapely shape out of edge-defined object data.
- *objName*: the name of the object
- *objData*: a dictionary
{'index':[int], 'varType':[string], 'desc':[string], 'value':[float], 'alphacut':[float]}
- *baseShapeData*: a dictionary.
Returns a Shapely shape. If there is an error, returns None (unless an exception is raised).
"""
shp_rt=None
#do some more validation. The length of all lists must be the same
#for each object.
len1=len(objData['index'])
lens=np.r_[len(objData['varType']),len(objData['value']), len(objData['alphacut'])]
if np.any(lens!=len1):
#the length of the edge indices must be the same as the length of
#all the other lists
print("Warning. Can't create object {}. The data was corrupt".format(objName))
else:
if baseShapeData['isClosed']:
numedges=len(baseShapeData['pts_x'])+1
else:
numedges=len(baseShapeData['pts_x'])
edge_offsets=np.zeros(numedges)*np.nan
for i in range(numedges):
idxs=ObjectManager._indices(objData['index'],i)
if len(idxs)==0:
edge_offsets[i]=0
elif len(idxs)!=1:
raise Exception('You should not see this error. Edge-defined object {} had more than one variables associated with index {}'.format(objName,i,))
else:
xyt=objData['varType'][idxs[0]]
if xyt=='e':
edge_offsets[i]=objData['value'][idxs[0]]
else:
raise Exception('Unexpected value for varType ({}) for edge-defined object {}'.format(xyt,objName))
#end if
#verify that all edges have the same alpha cut
acut=objData['alphacut'][0] #single alpha cut value
if any([a!=acut for a in objData['alphacut']]):
raise Exception('The edges must all have the same alpha cuts')
#now build a skeleton rigid object to hold this realization
#since this is a single realization, it only has 1 shape at a single alpha cut.
samples_dict={acut:edge_offsets}
edo=FuzzyObjects.EdgeDefinedObjectFromValues(baseShapeData['pts_x'],baseShapeData['pts_y'],
baseShapeData['isClosed'],
samples_dict)
#Get the shape. there should only be 1
shp_rt=edo.realizationsPolygons[acut][0]
return shp_rt
@staticmethod
def _puq2Shapes_vertexObjectSingle(objName,objData,baseShapeData):
"""
Builds a single Shapely shape out of vertex-defined object data.
- *objName*: the name of the object
- *objData*: a dictionary
{'index':[int], 'varType':[string], 'desc':[string], 'value':[float], 'alphacut':[float]}
- *baseShapeData*: a dictionary.
Returns a Shapely shape. If there is an error, returns None (unless an exception is raised).
"""
shp_rt=None
#do some more validation. The length of all lists must be the same
#for each object.
len1=len(objData['index'])
lens=np.r_[len(objData['varType']),len(objData['value']), len(objData['alphacut'])]
if np.any(lens!=len1):
#the length of the vertex indices must be the same as the length of
#all the other lists
print("Warning. Can't create object {}. The data was corrupt".format(objName))
else:
#ensure that we have the same number of x and y entries using generator comprehension
count_x=sum(xy=='x' for xy in objData['varType'])
count_y=sum(xy=='y' for xy in objData['varType'])
if count_x!=count_y:
print("Errpr for {}. There must be the same number of x and y values".format(objName))
elif count_x!=(len1/2):
print("Error for {}. unexpected count for 'varType'.".format(objName))
else:
pts_x,pts_y=ObjectManager._puq2shapes_deformableVertex_buildobject_helper(baseShapeData,
objData,
objName)
#verify that all edges have the same alpha cut
acut=objData['alphacut'][0] #single alpha cut value
if any([a!=acut for a in objData['alphacut']]):
raise Exception('The edges must all have the same alpha cuts')
#now build a skeleton object to hold this realization
#since this is a single realization, it only has 1 shape at a single alpha cut.
samples_dict={acut:np.hstack((pts_x,pts_y))}
vdo=FuzzyObjects.VertexDefinedObjectFromValues(baseShapeData['pts_x'],baseShapeData['pts_y'],
baseShapeData['isClosed'],
samples_dict)
#Get the shape. there should only be 1
shp_rt=vdo.realizationsPolygons[acut][0]
#end if
return shp_rt
@staticmethod
def _puq2shapes_deformableVertex_buildobject_helper(baseShapeData, objData,objName):
#make sure the lists are sorted according to the index so that we build
#the object in the right order. http://stackoverflow.com/questions/6618515
# ##DISABLED## not needed anymore
# objData_sorted=sorted(zip(objData['index'],objData['xORy'],
# objData['isClosed'],objData['value']),key=lambda pair:pair[0])
numvert=len(baseShapeData['pts_x'])
#build arrays which will hold the vertices
pts_x=np.zeros(numvert)*np.nan
pts_y=np.zeros(numvert)*np.nan
for i in range(numvert):
idxs=ObjectManager._indices(objData['index'],i)
if len(idxs)==0:
#if we're here, the ith vertex of baseshape was not found in the list
#of uncertain vertices. Therefore it must be certain. replace it with
#the corresponding vertex of baseshape
pts_x[i]=baseShapeData['pts_x'][i]
pts_y[i]=baseShapeData['pts_y'][i]
elif len(idxs)!=2:
raise Exception('You should not see this error. Deformable object {} had more than two variables associated with index {}'.format(objName,i,))
for idx in idxs:
#insert the x or y value into the correct location in the list.
#due to the above checks, there is guaranteed to be the same
#number of x and y values
xyt=objData['varType'][idx]
if xyt=='x':
pts_x[i]=objData['value'][idx]
elif xyt=='y':
pts_y[i]=objData['value'][idx]
else:
raise Exception('Unexpected value for varType ({}) for deformable object {}'.format(xyt,objName))
#end for
return pts_x,pts_y
def addObject(self,obj,name=None,data=None):
"""
Adds a CrispObject or FuzzyObject to the manager.
- *obj*: The object to add. The object must have been previously initialized
and realizations generated.
- *name*: A string which identifies the object. Must consist of letters and numbers
ONLY. Allowed names follow the restrictions for valid Python variable names.
If not specified, a name will be
generated automatically. If specified, it must be unique else, an error will
be generated.
- *data*: Any optional extra data to be stored with the object.
When adding multiple fuzzy objects (by subsequent calls to this function), all
objects must have the same alpha cuts.
"""
if name!=None and name!="":
#don't allow name with special characters. Also
reg=re.compile(r'^[^a-zA-Z_]')
if bool(reg.search(name)):
raise Exception('{} is not valid. Name must start with a letter'.format(name))
reg=re.compile(r'__+')
if bool(reg.search(name)):
raise Exception('{} is not valid. Double underscores are reserved.'.format(name))
reg=re.compile(r'\W')
if bool(reg.search(name)):
raise Exception('{} is not valid. Name can only contain letters, numbers and _'.format(name))
name=self._getNextKey(self._objects) if name==None or name=="" else name
if obj._lastDistr==None:
raise Exception('Object not initialized. Must generate realizations first')
if utilities.isCrispObject(obj):
cls='probabilistic'
elif utilities.isFuzzyObject(obj):
cls='fuzzy'
#check to make sure they all have the same alpha cuts
if cls in self._classes.keys():
for fuzzyobjname,managedfuzzyobj in self._objects.iteritems():
if managedfuzzyobj['class']==cls:
if not all([cut in managedfuzzyobj['obj'].realizations.keys() for cut in obj.realizations.keys()]):
raise Exception('All fuzzy objects must have the same alpha cuts')
else:
raise Exception ("unrecognized type " + str(type(obj)))
try:
#add to dictionary. object is stored by reference
self._objects[name]={'obj':obj, 'class':cls, 'data':data}
if cls in self._classes.keys():
#if group exists, add obj to it
self._classes[cls].append(name)
else:
self._classes[cls]=[name]
except Exception,e:
raise Exception('Error adding object of type:{} '.format(cls) + str(e))
@property
def objects(self):
"""
Returns the objects currently managed by ObjectManager.
Objects are returned in the form of a dictionary with keys equal to the
object names and values containing dictionaries with keys 'obj' (the Wiggly
object being managed), 'class' (the group assigned to obj), and 'data'.
{$name$:{'obj':<shapely shape>}, 'class':<string>, 'data':<object>}
'class' can be 'probabilistic' or 'fuzzy'.
"""
return self._objects
@property
def probabilisticObjects(self):
"""
Returns a list of all the CrispObjects that have been added via :func:`addObject`.
To get the object's extra data use :attr:`objects`
"""
if not 'probabilistic' in self._classes:
return []
return [self._objects[obj]['obj'] for obj in self._classes['probabilistic']]
@property
def fuzzyObjects(self):
"""
Returns a list of all the FuzzyObjects that have been added via :func:`addObject`.
To get the object's extra data use :attr:`objects`
"""
if not 'fuzzy' in self._classes:
return []
return [self._objects[obj]['obj'] for obj in self._classes['fuzzy']]
def crispObjects2PuqParams(self,shapesFileName='shapes.json',use_samples=True):
"""
Converts crisp (probabilistic) objects into puq Parameter objects.
- *shapesFileName*: a file name which holds the base shapes.
- *use_samples*: If True, uses the FObject's previously generated realizations (default).
If the object element (ie, vertex, edge etc) is not a constant, a puq CustomParameter
object is returned with it's values property set to the realizations.
If False, an appropriate puq Parameter object is returned, with its values property set
to an empty list. E.g., for a normally distributed DeformableObject, NormalParameter
objects are returned. Unlike the CustomParameters generated if this flag is False,
these Parameter objects will be sampled by puq.
Returns: a list of puq CustomParameter or ConstantParameter objects.
The name of each Parameter is based on the name given to the associated
object when it was added in addObject. For example, for an object named
'house', the x-coordinate of
the third vertex of the object is encoded using the following convention:
wXY__house__x__2
All parameters constructed with this function are identified by the first 5 characters:
'wXY\_\_'
The total number of Parameters contributed to the returned list can
be determined from the table:
=================== =====================
Object Type Num. Parameters
contributed
=================== =====================
DeformableObject 2N - one for each of
the x,y coordinate
pairs for the N
vertices.
RigidObject 3 - x, y, and theta
=================== =====================
For example, if the ObjectManager contains a single RigidObject, calling
this function will return a list of 3 custom parameters and the base shape
of the rigid object will be written to *shapesFileName*
"""
if shapesFileName==None:
raise Exception("must specify a file name which will hold the shapes to process")
if len(self._objects)==0:
raise Exception("There are no objects to convert")
shapes={}
puq_params=[]
for objName in self._classes['probabilistic']:
managedObj=self._objects[objName]
obj=managedObj['obj']
isClosed=obj.isClosed
uncert_pts=obj.uncertainVertices
objtype=None
origin=None
if utilities.isRigidObject(obj):
desc='rigid object coord'
objtype='R'
#get the data from the rigid object
x_samples,y_samples,t_samples=obj.realizationsParams
origin=obj.origin
data={'x':x_samples, 'y':y_samples, 't':t_samples}
var=['x','y','t']
for i,v in enumerate(var):
name='wXY__{}__{}__{}'.format(objName,v,0)
if np.all(data[v]==data[v][0]):
#if all the samples for this variable are the same, it is a constant
param=puq.ConstantParameter(name,'[C] ' + desc,attrs=[('uncert_type','prob-const')],
value=np.r_[data[v][0]])
else:
if use_samples:
param=puq.CustomParameter(name,desc,attrs=[('uncert_type','prob')],
pdf=data[v],use_samples_val=True)
else:
param=self._element2SpecificPuqParam(name,desc,obj,i)
if param!=None:
puq_params.append(param)
elif utilities.isDeformableObject(obj):
desc='deformable object coord.'
objtype='D'
#get the data from the object
pts_x_samples,pts_y_samples=obj.realizations
for i in range(np.size(pts_x_samples,1)):
#for the x,y coord of each vertex (column of pts_<>_samples),
#create a parameter. set the parameter values to the
#samples (the rows)
name='wXY__{}__x__{}'.format(objName,i)
if np.all(pts_x_samples[:,i]==pts_x_samples[0,i]):
#if all the samples for this variable are the same, it is a constant
param=puq.ConstantParameter(name,'[C] ' + desc,attrs=[('uncert_type','prob-const')],
value=np.r_[pts_x_samples[0,i]])
if self._ignore_certain_vertices:
param=None
else:
if use_samples:
param=puq.CustomParameter(name,desc,attrs=[('uncert_type','prob')],
pdf=pts_x_samples[:,i],use_samples_val=True)
else:
param=self._element2SpecificPuqParam(name,desc,obj,i)
if param!=None:
puq_params.append(param)
name='wXY__{}__y__{}'.format(objName,i)
if np.all(pts_y_samples[:,i]==pts_y_samples[0,i]):
param=puq.ConstantParameter(name,'[C] ' + desc,attrs=[('uncert_type','prob-const')],
value=np.r_[pts_y_samples[0,i]])
if self._ignore_certain_vertices:
param=None
else:
if use_samples:
param=puq.CustomParameter(name,desc,attrs=[('uncert_type','prob')],
pdf=pts_y_samples[:,i],use_samples_val=True)
else:
param=self._element2SpecificPuqParam(name,desc,obj,np.size(pts_x_samples,1)+i)
if param!=None:
puq_params.append(param)
else:
raise Exception("object of type {} not supported".format(str(type(obj))))
shapes[objName]={'type':objtype,'pts_x':obj.coords[0], 'pts_y':obj.coords[1],
'isClosed':isClosed,'origin':origin, 'uncert_pts':uncert_pts}
#end for: objName in self._classes['probabilistic']
self._writeShapesFile(shapes,shapesFileName)
return puq_params
def fuzzyObjects2PuqParams(self,shapesFileName='shapes.json',use_samples=True):
"""
Converts fuzzy objects into puq Parameter objects.
- *shapesFileName*: a file name to which the base shapes will be written.
- *use_samples*: If True, uses the FObject's previously generated realizations (default).
If the object element (ie, vertex, edge etc) is not a constant, a puq CustomParameter
object is returned with it's values property set to the realizations.
If False, an appropriate puq Parameter object is returned, with its values property set
to an empty list. E.g., for a normally distributed DeformableObject, NormalParameter
objects are returned. Unlike the CustomParameters generated if this flag is False,
these Parameter objects will be sampled by puq.
Returns a dictionary with keys equal to alpha cuts of the objects. The values
are dictionaries with keys 'params', 'num_realizations'
{ $a-cut$:{'params':[object], 'num_realizations':integer}, ...}
'params' is a list of puq CustomParameter or ConstantParameter objects each containing
samples of vertex coordinates or edges at that
particular alpha cut. **All non-constant parameters at a given a-cut are guaranteed to have
the same number of realizations.** This number is the maximum number of realizzations of
all objects at that alpha cut and is given in the
'num_realizations' entry. If a parameter is a constant, it will have 1 realization.
For example at alpha-cut 0.2, O1 has 10 realizations and O2 has 8.
The generated puq parameters for O1 will all have 10 realizations. For O2, the number of
realizations will be increased to 10 by repeating the first two samples. If O2 is a constant
instead, the number of realizations in the puq parameter is 1.
All fuzzy objects are guaranteed to have the same alpha cuts.
The name of each Parameter is based on the name given to the associated
object when it was added in addObject. For example, for an object named
'house', the x-coordinate of
the third vertex of the object at alpha cut 1 is encoded using the following convention:
wXY__house__x__2__10
For an edge defined object, third edge alpha cut 0.8,
wXY__house__e__2__8
All parameters constructed with this function are identified by the first 5 characters
'wXY\_\_'.
The total number of Parameters contributed to the returned dictionary for a single alpha-cut
can be determined from the table:
=================== =====================
Object Type Num. Parameters
contributed
=================== =====================
VertexDefinedObject 2N - one for each of
the x,y coordiate
pairs for the N
vertices.
EdgeDefinedObject N - one for each of
the N edges of the
object.
=================== =====================
"""
if shapesFileName==None:
raise Exception("must specify a file name which will hold the shapes to process")
if len(self._objects)==0:
raise Exception("There are no objects to convert")
shapes={}
puq_params={}
#in addObject we verified that all fuzzy objects have the same alpha cuts.
#Get a list of alpha cuts from the first object
acuts=self.fuzzyObjects[0].realizations.keys()
#loop over the alpha cuts. for each alpha cut, determine which object
#has the most realizations. For objects which have fewer realizations than
#this number, replicate the realizations so that they all have the same number.
for acut in acuts:
maxrealizations=0
for objName in self._classes['fuzzy']:
managedObj=self._objects[objName] #dictionary. see addObject
obj=managedObj['obj'] #FObject
if len(obj.getRealizations4Sim(acut))>maxrealizations:
maxrealizations=len(obj.getRealizations4Sim(acut))
#print(acut,objName,len(obj.getRealizations4Sim(acut)))
#print('maxrealizations',maxrealizations)
#holds the puq params at this alpha level
puq_params_alpha=[]
for objName in self._classes['fuzzy']:
managedObj=self._objects[objName] #dictionary. see addObject
obj=managedObj['obj'] #FObject
isClosed=obj.isClosed
objtype=None
#list of numpy arrays
#Each array is guaranteed to have the same length
realizations=obj.getRealizations4Sim(acut)
#fuzzyVars: columns are variables, rows realizations
#the order is defined in the constructor of VertexDefinedObject.
#The order is first all the x variables, then the y variables.
fuzzyVars=np.vstack(realizations)
nFuzzyVars=np.size(fuzzyVars,1)
numrealizations_this=np.size(fuzzyVars,0)
#print(acut,objName,np.size(fuzzyVars,0))
if numrealizations_this<maxrealizations:
#if this object has fewer realizations than the maximum out of
#all fuzzy objects, expand the fuzzyVars matrix to make up the difference
#by copying the required values from the top
fuzzyVars=np.take(fuzzyVars,range(maxrealizations),axis=0,mode='wrap')
print('Fuzzy var {} had n={} realiz. at a-cut {}. Adjusted to n={}'.format(
objName,numrealizations_this,acut,maxrealizations) )
#this fuzzy variable now has the same number of realizations as the others.
#We can now process it.
if utilities.isVertexDefinedObject(obj):
desc='vertex-defined obj coord @a-cut {}'.format(acut)
objtype='V'
var=['x','y']
if nFuzzyVars%2!=0:
raise Exception('Vertex-defined object must have the same number of x and y variables')
for i in range(nFuzzyVars/2):
for k in range(len(var)):
name='wXY__{}__{}__{}__{}'.format(objName,var[k],i,format(acut*10,'.0f'))
samples=fuzzyVars[:,i+nFuzzyVars/2*k]
if np.all(samples==samples[0]):
#if all the samples for this variable are the same, it is a constant
param=puq.ConstantParameter(name,'[C] ' + desc,attrs=[('uncert_type','fuzzy-const')],
value=np.r_[samples[0]])
if self._ignore_certain_vertices:
param=None
else:
if use_samples:
param=puq.CustomParameter(name,desc,attrs=[('uncert_type','fuzzy')],
pdf=samples,use_samples_val=True)
else:
#print(name)
param=self._element2SpecificPuqParam(name,desc,obj,(nFuzzyVars/2)*k+i,acut)
if param!=None:
puq_params_alpha.append(param)
#end for k in range(len(var))
#end for i in range(nFuzzyVars/2)
elif utilities.isEdgeDefinedObject(obj):
desc='edge-defined obj edge offset @a-cut {}'.format(acut)
objtype='E'
for i in range(nFuzzyVars):
name='wXY__{}__e__{}__{}'.format(objName,i,format(acut*10,'.0f'))
samples=fuzzyVars[:,i]
if np.all(samples==samples[0]):
#if all the samples for this variable are the same, it is a constant
param=puq.ConstantParameter(name,'[C] ' + desc,attrs=[('uncert_type','fuzzy-const')],
value=np.r_[samples[0]])
if self._ignore_certain_vertices:
param=None
else:
if use_samples:
param=puq.CustomParameter(name,desc,attrs=[('uncert_type','fuzzy')],
pdf=samples,use_samples_val=True)
else:
param=self._element2SpecificPuqParam(name,desc,obj,i,acut)
if param!=None:
puq_params_alpha.append(param)
else:
raise Exception("object of type {} not supported".format(str(type(obj))))
shapes[objName]={'type':objtype,'pts_x':obj.coords[0], 'pts_y':obj.coords[1],
'isClosed':isClosed,'origin':None, 'uncert_pts':None}
#end for: objName in self._classes['fuzzy']
puq_params[acut]={'params':puq_params_alpha, 'num_realizations':maxrealizations}
#end for: acut in acuts
self._writeShapesFile(shapes,shapesFileName)
return puq_params
def _element2SpecificPuqParam(self,name,desc,obj,i,alpha=None):
"""
Helper function to convert an FObject variable to a puq parameter of a certain type.
The returned puq parameter can be fed to puq which will sample it.
- *name,desc*: the parameter name and description
- *obj*: the object to which the element to be converted belongs
- *i*: the index of the element to convert. This varies depending on the object type
e.g., for a deformable object, index 0 converts the x coordinate of the first vertex.
- *alpha*: the alpha cut to process (only for fuzzy *obj*)
Note this function does not take into consideration elements that are certain. Such
elements are treated as uncertain. Therefore the conversion of these elements must
take place outside this function.
"""
if utilities.isCrispObject(obj):
if obj._lastDistr==distributions.DIST_NORM:
param=puq.NormalParameter(name,desc,attrs=[('uncert_type','prob')],
mean=obj._means[i],dev=np.sqrt(obj._variances[i]))
param.values=[]
elif obj._lastDistr==distributions.DIST_UNIF:
param=puq.UniformParameter(name,desc,attrs=[('uncert_type','prob')],
min=obj._bound_lower[i],max=obj._bound_upper[i])
param.values=[]
else:
raise Exception('Distribution of type {} not supported'.format(obj._lastDistr))
elif utilities.isFuzzyObject(obj):
bound_lower=obj._fuzzyVariables[i].alpha(alpha)[0]
bound_upper=obj._fuzzyVariables[i].alpha(alpha)[1]
param=puq.UniformParameter(name,desc,attrs=[('uncert_type','fuzzy')],
min=bound_lower,max=bound_upper)
param.values=[]
else:
raise Exception('_element2SpecificPuqParam: obj type not supported')
return param
def _writeShapesFile(self,shapes,shapesFileName):
"""
writes the auxiliary shapes file used to reconstruct the shapes.
- *shapes*: a dictionary.
- *shapesFileName*: the file to write
"""
existingshapes={}
try:
if not self._shapesFileFirstWrite:
#only try to read from an existing file if we've already written to it.
#This avoids reading from a file left over from a previous run
f=open(shapesFileName,'r')
existingshapes=puq.unpickle(f.read())
f.close()
except IOError:
pass
f=open(shapesFileName,'w')
existingshapes.update(shapes)
s=puq.jpickle.pickle(existingshapes)
f.write(s)
f.close()
self._shapesFileFirstWrite=False
def _getNextKey(self,d):
"""
Gets a unique key for the dictionary *d* consisting of a 16 character hex value.
"""
newkey=binascii.hexlify(os.urandom(8))
i=0
while newkey in d.keys():
newkey=binascii.hexlify(os.urandom(8))
i+=1
if i>1000:
raise Exception("Couldn't find unique id")
return newkey
def test_addFuzzyObjects():
plt.close('all')
n=10
np.random.seed(96931694)
method='random'
#method='reducedtransformation' #only for plotting. setting this will fail the assertion below
#n=None
#acuts=np.linspace(0,1,num=11)
acuts=np.r_[0,0.5,1] #fewer for clarity
#define a polygon
pt_x=np.r_[480.,485,520,510,520]
pt_y=np.r_[123.,100,105,110,117]
#######
#edge defined
#define fuzzy numbers for all the edges.
#trapezoidal fuzzy numbers are in the form
# (kernel_lower,kernel_upper), (support_lower,support_upper)
edgeMembFcn=[fuzz.TrapezoidalFuzzyNumber((0, 0), (0, 0)),
fuzz.TrapezoidalFuzzyNumber((0, 0), (-3, 3)),
fuzz.TrapezoidalFuzzyNumber((-1.5, 1.5), (-5, 7)),
fuzz.TrapezoidalFuzzyNumber((-1, 1), (-3, 3)),
fuzz.TrapezoidalFuzzyNumber((-.75, .75), (-1, 1))]
edo=FuzzyObjects.EdgeDefinedObject(pt_x,pt_y,edgeMembFcn,isClosed=True)
edo.generateRealizations(n,acuts,method)
#edo.plot()
#edo.plotFuzzyNumbers()
###########
#vertex defined
membFcn_x=[ fuzz.TrapezoidalFuzzyNumber((0, 0), (0, 0)),
fuzz.TrapezoidalFuzzyNumber((0, 0), (-2, 2)),
fuzz.TrapezoidalFuzzyNumber((-2, 2), (-2, 2)),
fuzz.TrapezoidalFuzzyNumber((-1, 1), (-1.5, 3)),
fuzz.TrapezoidalFuzzyNumber((-0.5, 0.5), (-2, 1))]
#test a point
membFcn_x=[ fuzz.TrapezoidalFuzzyNumber((-0.5, 0.5), (-1.5, 1.5))]
pt_x=np.r_[480.]
pt_y=np.r_[123.]
membFcn_y=membFcn_x
vdo=FuzzyObjects.VertexDefinedObject(pt_x+50,pt_y,membFcn_x,membFcn_y,isClosed=False)
vdo.generateRealizations(n,acuts,method)
vdo.plot()
vdo.plotFuzzyNumbers()
manager=ObjectManager(ignore_certain_vertices=True)
#manager.addObject(edo,name='edo1')
manager.addObject(vdo,name='vdo1')
print('objects: ' + str(manager.objects))
print('convert to puq params:')
baseshapesfile='shapes.json'
print('base shapes in {}'.format(baseshapesfile))
s_all={}
puqparams_all=manager.fuzzyObjects2PuqParams(baseshapesfile)
#puqparams_all=manager.fuzzyObjects2PuqParams(baseshapesfile,use_samples=False)
for acut,puqparams_data in puqparams_all.iteritems():
print("alpha cut {}".format(acut))
puqparams=puqparams_data['params']
s=[]
oldname=''
for puqparam in puqparams:
name=puqparam.name.split('__')[1]
vertex=int(puqparam.name.split('__')[3])
var=puqparam.name.split('__')[2]
if name!=oldname:
print(' {}'.format(name))
oldname=name
print('\tn:{} nm:{} desc:{} \n\t\t{}'.format(np.size(puqparam.values),puqparam.name,puqparam.description,type(puqparam)))
#add the name, desc, and values
s.append({'name':puqparam.name,'desc':puqparam.description,'value':puqparam.values})
#check to make sure the order of the realizations matches after converting to puq params
#see issue #78. The check only works if method=random
if name=='edo1':
assert method=='random','only random realizations supported'
#use the min since puqparams.values can have more or less entries than getrealizations4sim
# see documentation of fuzzyObjects2puqParams
for i in range(min(np.size(puqparam.values),np.size(edo.getRealizations4Sim(acut)[:,vertex]))):
assert(puqparam.values[i]==edo.getRealizations4Sim(acut)[i,vertex])
if name=='vdo1':
assert method=='random','only random realizations supported'
rlz=vdo.getRealizations4Sim(acut)
x=rlz[:,vertex]
y=rlz[:,(np.size(rlz,1)/2)+vertex]
if var=='x':
for i in range(min(np.size(puqparam.values),np.size(x))):
assert(puqparam.values[i]==x[i])
if var=='y':
for i in range(min(np.size(puqparam.values),np.size(y))):
assert(puqparam.values[i]==y[i])
s_all[acut]=s
plt.show()
plt.figure()
return s_all,baseshapesfile,n,[edo,vdo]
def test_fuzzyObjectsFromPuq():
#tests re-building shapely shapes from puq parameters
params_all_acuts,baseshapesfile,n,objects=test_addFuzzyObjects()
print('')
cm=plt.get_cmap('jet')
sorted_acuts=sorted(params_all_acuts.keys())
for acut in sorted_acuts:
params_all=params_all_acuts[acut]
ac=0.05 if acut==0 else acut
clr=cm(ac*0.999999)
maxrealizations=0
for param in params_all:
if np.size(param['value'])>maxrealizations:
maxrealizations= np.size(param['value'])
for i in range(maxrealizations):
params=[]
for j,param in enumerate(params_all):
#the % is needed for 'value' in order to handle constants which only have 1 realization.
params.append({'name':param['name'], 'desc':param['desc'],
'value':param['value'][i%np.size(param['value'])]})
shapes=ObjectManager.puq2Shapes(baseshapesfile,params=params)
print("acut {} realization {}, number of shapes: {}".format(acut,i,len(shapes)))
for shpName,shpData in shapes.iteritems():
shp=shpData['shp']
shpDesc=shpData['desc']
if utilities.isShapelyPoint(shp):
xy=shp.coords
plt.plot(xy[0][0],xy[0][1],'o',color=clr)
elif utilities.isShapelyLineString(shp):
xy=np.asarray(shp)
plt.plot(xy[:,0],xy[:,1],'-',color=clr)
elif utilities.isShapelyPolygon(shp):
xy=np.asarray(shp.exterior)
plt.plot(xy[:,0],xy[:,1],'-',color=clr)
#testing issue #73. i%l is needed because when combining objects, the maxrealizations
#is the largers number of realizations out of all the objects.
if shpName=='edo1':
l=len(objects[0].realizationsPolygons[acut])
assert(np.all(xy==np.asarray(objects[0].realizationsPolygons[acut][i%l].exterior)))
elif shpName=='vdo1':
pass
else:
print("error: unsupported shapely type: {}".format(str(type(shp))))
print("\tshpname {}\n\t\tpuq param desc: {}".format(shpName,shpDesc))
print('----')
print('\n\n')
plt.axis('equal')
plt.show()
def test_addCrispObjects():
n=7
plt.close('all')
########
#build rigid object
#define a polygon
pt_x=np.r_[480.,485,520,510,520]
pt_y=np.r_[123.,100,105,110,117]
uncertain_pts=np.r_[1,1,1,0,1]
centroid_x=np.mean(pt_x)
centroid_y=np.mean(pt_y)
centroid_xv=2.
centroid_yv=2.
theta_mn=0.
theta_v=10
variances=np.r_[centroid_xv,centroid_yv,theta_v]
#x,y correlated but uncorrelated to theta
cor3=np.r_[[
#x y theta
[1, 0.8, 0], #x
[0.8, 1, 0], #y
[0, 0, 1] #theta
]]
cov=utilities.cor2cov(cor3,variances)
ro=CrispObjects.RigidObject(pt_x,pt_y,centroid_x,centroid_y,theta_mn,cov,uncertain_pts)
ro.generateNormal(n,translate=False,rotate=True)
#ro.plot(1)
#########
#########
#build deformable object
#define a point
pt_x=np.r_[495]
pt_y=np.r_[110]
uncertain_pts=[1]
xv=20
yv=10
variances=np.r_[xv,yv]
cor=np.r_[[
#x0 #y0
[1, 0], #x0
[0, 1], #x1
]]
cov=utilities.cor2cov(cor,variances)
do1=CrispObjects.DeformableObject(pt_x,pt_y,cov,uncertain_pts,isClosed=False)
do1.generateUniform(n)
#do1.plot(1,points=True)
#define a line
pt_x=np.r_[488,502]
pt_y=np.r_[107,107]
uncertain_pts=[0,1]
variances=[5,5,5,5]
cor=np.r_[[
#x0 x1 y0 y1
[1, 0, 0, 0], #x0
[0, 1, 0, 0], #x1
[0, 0, 1, 0], #y0
[0, 0, 0, 1], #y1
]]
cov=utilities.cor2cov(cor,variances)
do2=CrispObjects.DeformableObject(pt_x,pt_y,cov,uncertain_pts,isClosed=False)
do2.generateUniform(n)
#do2.plot(1)
#plt.show()
#########
#manager=ObjectManager()
manager=ObjectManager(ignore_certain_vertices=True)
manager.addObject(ro,'ro')
manager.addObject(do1,'do1')
manager.addObject(do2,'do2')
print('objects: ' + str(manager.objects))
print('convert to puq params:')
baseshapesfile='shapes.json'
print('base shapes in {}'.format(baseshapesfile))
s=[]
oldname=''
puqparams=manager.crispObjects2PuqParams(baseshapesfile)
#puqparams=manager.crispObjects2PuqParams(baseshapesfile,use_samples=False)
for puqparam in puqparams:
name=puqparam.name.split('__')[1]
if name!=oldname:
print(' {}'.format(name))
oldname=name
print('\tn:{} nm:{} desc:{} \n\t\t{}'.format(np.size(puqparam.values),puqparam.name,puqparam.description,type(puqparam)))
#print('\t{}'.format(puqparam.values))
#add the name, desc, and values
s.append({'name':puqparam.name,'desc':puqparam.description,'value':puqparam.values})
#check to make sure the order of the generated realuzations is the same after
#converting to puq parameters
#see issue #78
for puqparam in puqparams:
name=puqparam.name.split('__')[1]
var=puqparam.name.split('__')[2]
if name=='ro':
x,y,t=ro.realizationsParams
if var=='x':
for i in range(np.size(puqparam.values)):
assert(x[i]==puqparam.values[i])
if var=='y':
for i in range(np.size(puqparam.values)):
assert(y[i]==puqparam.values[i])
if var=='t':
for i in range(np.size(puqparam.values)):
assert(t[i]==puqparam.values[i])
if name=='do1' or name=='do2':
vertex=int(puqparam.name.split('__')[3])
if name=='do1':
x=do1.realizations[0]
y=do1.realizations[1]
else:
#print(do2.realizations)
x=do2.realizations[0]
y=do2.realizations[1]
if var=='x':
for i in range(np.size(puqparam.values)):
assert(x[i,vertex]==puqparam.values[i])
if var=='y':
for i in range(np.size(puqparam.values)):
assert(y[i,vertex]==puqparam.values[i])
return s,baseshapesfile,n,[ro,do1,do2]
def test_crispObjectsFromPuq():
#tests re-building shapely shapes from puq parameters
#note: this function only works when manager.crispObjects2PuqParams use_samples=True
# in test_addCrispObjects
plt.close('all')
params_all,baseshapesfile,n,objects=test_addCrispObjects()
print('')
for i in range(n): #n is number of realizations.
params=[]
for j,param in enumerate(params_all):
#the % is needed for 'value' in order to handle constants which only have 1 realization.
params.append({'name':param['name'], 'desc':param['desc'],
'value':param['value'][i%np.size(param['value'])]})
print(params)
shapes=ObjectManager.puq2Shapes(baseshapesfile,params=params)
print("realization {}, number of shapes: {}".format(i,len(shapes)))
for shpName,shpData in shapes.iteritems():
shp=shpData['shp']
shpDesc=shpData['desc']
if utilities.isShapelyPoint(shp):
xy=shp.coords
plt.plot(xy[0][0],xy[0][1],'o')
assert(np.all(xy==np.asarray(objects[1].realizationsPolygons[i].coords)))
elif utilities.isShapelyLineString(shp):
xy=np.asarray(shp)
plt.plot(xy[:,0],xy[:,1],'-')
#assert(np.all(xy==np.asarray(objects[2].realizationsPolygons[i]))) #assert will fail when use_samples=True in test_addCrispObjects
elif utilities.isShapelyPolygon(shp):
xy=np.asarray(shp.exterior)
plt.plot(xy[:,0],xy[:,1],'-')
#check that the realizations after using puq2shapes are in the same order
#as the original object. See #78
# object[0] is the rigid object polygon
assert(np.all(xy==np.asarray(objects[0].realizationsPolygons[i].exterior)))
else:
print("error: unsupported shapely type: {}".format(str(type(shp))))
print("\tshpname {}\n\t\tdesc {}".format(shpName,shpDesc))
plt.show()
#check if we're executing this script as the main script
if __name__ == '__main__':
np.random.seed(93113488)
#test_addCrispObjects()
#test_crispObjectsFromPuq()
#test_addFuzzyObjects()
test_fuzzyObjectsFromPuq() | zoidy/wiggly | objectmanager.py | Python | lgpl-3.0 | 60,303 |
#Created by Dmytro Konobrytskyi, 2013 (github.com/Akson)
import logging
import json
import struct
import numpy as np
def ParseBinaryData(binaryData, binaryDataFormat, dimensions):
elementSize = struct.calcsize(binaryDataFormat)
elementsNumber = len(binaryData) / elementSize
#Single element case
if elementsNumber == 1:
return struct.unpack(binaryDataFormat, binaryData)[0]
#It looks like we have an array, parse it with NumPy
if dimensions == None:
return np.frombuffer(binaryData, binaryDataFormat)
#And it is actually a multi-dimensional array
return np.ndarray(shape=dimensions, dtype=binaryDataFormat, buffer=binaryData)
def ParseDimensionsString(dimensionsString):
dimensionsString = dimensionsString.lower()
dimensionsString = dimensionsString.replace("(", "")
dimensionsString = dimensionsString.replace(")", "")
dimensionsString = dimensionsString.replace("[", "")
dimensionsString = dimensionsString.replace("]", "")
dimensionsString = dimensionsString.replace(" ", "")
dimensionsString = dimensionsString.replace("x", ",")
dimensionsString = dimensionsString.replace(";", ",")
dimensions = [int(ds) for ds in dimensionsString.split(",")]
return dimensions
def ParseMessage(message):
processedMessage = dict()
processedMessage["Stream"] = message["Stream"]
processedMessage["Info"] = message["Info"]
#Parse data based on format. String is a default format
dataType = message["Info"].get("DataType", "String")
if dataType == "String":
processedMessage["Data"] = message["Data"]
if dataType == "JSON":
jsonObj = json.loads(message["Data"])
processedMessage["Data"] = jsonObj.get("_Value", jsonObj)
if dataType == "Binary":
if not "BinaryDataFormat" in message["Info"]:
logging.warning("Cannot parse binary data, no format data available")
return None
binaryDataFormat = message["Info"]["BinaryDataFormat"]
#We may have multi-dimensional data
dimensions = None
if "Dimensions" in message["Info"]:
dimensions = ParseDimensionsString(message["Info"]["Dimensions"])
processedMessage["Data"] = ParseBinaryData(message["Data"], binaryDataFormat, dimensions)
return processedMessage
| Akson/RemoteConsolePlus3 | RemoteConsolePlus3/RCP3/DefaultParser.py | Python | lgpl-3.0 | 2,359 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-04 19:14
from __future__ import unicode_literals
import archives.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('archives', '0003_attachment'),
]
operations = [
migrations.CreateModel(
name='PostImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.ImageField(blank=True, upload_to='images/%Y/%m/%d', validators=[archives.models.check_image_extension], verbose_name='图片')),
('created_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('last_modify_time', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('uploaded_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='上传者')),
],
),
]
| phith0n/mooder | archives/migrations/0004_postimage.py | Python | lgpl-3.0 | 1,192 |
# Copyright 2017 The Tangram Developers. See the AUTHORS file at the
# top-level directory of this distribution and at
# https://github.com/renatoGarcia/tangram/blob/master/AUTHORS.
#
# This file is part of Tangram.
#
# Tangram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tangram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Tangram in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from .imshow import imshow
| renatoGarcia/tangram | tangram/recipes/__init__.py | Python | lgpl-3.0 | 927 |
#!/usr/bin/env python2.7
# Uploads Scrypt mining farm status to CouchDB database for detailed logging.
# Written by Vishakh.
# https://github.com/vishakh/valkyrie
# Based on open source code by etkeh <https://github.com/setkeh>
import datetime
import json
import logging
import socket
import subprocess
import sys
import time
import couchdb
def readHostsFile(filename):
hosts = []
json_data = open( filename )
data = json.load(json_data)
for name in data:
info = data[name]
host = info['host']
port = info['port']
hosts.append([host, port, name])
return hosts
def readConfigFile(filename):
json_data = open( filename )
data = json.load(json_data)
couchdb_server = data['couchdb_server']
couchdb_database = data['couchdb_database']
socket_timeout = int(data['socket_timeout'])
log_interval = int(data['log_interval'])
temperature_script = None
if 'temperature_script' in data:
temperature_script = data['temperature_script']
return couchdb_server, couchdb_database, socket_timeout, log_interval, temperature_script
def linesplit(socket):
buffer = socket.recv(4096)
done = False
while not done:
more = socket.recv(4096)
if not more:
done = True
else:
buffer = buffer + more
if buffer:
return buffer
def makerpccall(api_command, api_ip, api_port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(socket_timeout)
s.connect((api_ip, int(api_port)))
if len(api_command) == 2:
s.send(json.dumps({"command": api_command[0], "parameter": api_command[1]}))
else:
s.send(json.dumps({"command": api_command[0]}))
resp = linesplit(s)
resp = resp.replace('\x00', '')
resp = json.loads(resp)
s.close()
return resp
def tryDBConnect():
server = couchdb.Server(url=couchdb_server)
db = server[couchdb_database]
return db, server
def connectToDB():
log.info("Connecting to DB.")
while True:
try:
db, server = tryDBConnect()
log.info("DB connect successful.")
return server, db
except:
e = sys.exc_info()[0]
log.error("Could not connect to DB.")
log.info(e)
log.info("Will retry after sleep..")
time.sleep(log_interval)
def runIteration():
log.info('Running iteration')
try:
utctime = str(datetime.datetime.utcnow())
unix_time = str(time.time())
miners = {}
total_hashrate = 0.0
total_miners = 0
total_gpus = 0
temperature = None
for host, port, name in hosts:
try:
log.info('Querying %s at %s:%s' % (name, host, port))
currenthost = {}
command = 'summary'
response = makerpccall([command], host, port)
summary = response['SUMMARY'][0]
currenthost[command] = summary
command = 'config'
response = makerpccall([command], host, port)
config = response['CONFIG']
currenthost[command] = config
command = 'pools'
response = makerpccall([command], host, port)
pools = response['POOLS']
currenthost[command] = pools
command = 'devs'
response = makerpccall([command], host, port)
devices = response['DEVS']
currenthost[command] = devices
command = 'coin'
response = makerpccall([command], host, port)
devdetails = response['COIN']
currenthost[command] = devdetails
miners[name] = currenthost
temperature = None
try:
if temperature_script is not None:
temperature = subprocess.check_output(temperature_script).strip()
temperature = temperature.replace('\r', '').replace('\n', '')
else:
log.info('Skipping temperature recording as no script is provided.')
except:
log.warn('Could not get farm temperature.')
e = sys.exc_info()[0]
log.info(e)
# Cumulative statistics
hashrate = summary['MHS 5s']
if (type(hashrate) == str or type(hashrate) is None) and ('E' in hashrate or 'e' in hashrate):
hashrate = float(hashrate[:-1])/10
total_hashrate += hashrate
total_miners += 1
gpus = len(devices)
total_gpus += gpus
except:
log.error("Could not fetch data from host " + name + " at host " + host + " and port " + port)
e = sys.exc_info()
log.info(e)
record = {'_id': unix_time, 'unixtime': unix_time, 'utctime': utctime, 'total_hashrate': total_hashrate,
'total_miners': total_miners,
'total_gpus': total_gpus, 'temperature': temperature, 'miners': miners}
try:
db[unix_time] = record
db.commit()
except:
log.warn('Could not write to database. Attempting to reconnect for next iteration..')
connectToDB()
except:
e = sys.exc_info()
log.error("Error during iteration")
logging.exception(e)
log.info('Done with iteration.')
log = logging.getLogger('Valkyrie')
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s %(filename)s %(lineno)d - %(levelname)s - %(message)s"))
log.addHandler(handler)
config_file = sys.argv[1]
hosts_file = sys.argv[2]
hosts = readHostsFile(hosts_file)
couchdb_server, couchdb_database, socket_timeout, log_interval, temperature_script = readConfigFile(config_file)
server, db = connectToDB()
while True:
runIteration()
log.info('Sleeping for %s seconds.'%log_interval)
time.sleep(log_interval)
| vishakh/valkyrie | valkyrie.py | Python | lgpl-3.0 | 6,166 |
# coding=utf8
from __future__ import print_function
import re
import sys
import socket
from untwisted.mode import Mode
from untwisted.network import Work
from untwisted.event import DATA, BUFFER, FOUND, CLOSE, RECV_ERR
from untwisted.utils import std
from untwisted.utils.common import append, shrug
from untwisted.magic import sign
import util
import debug
import runtime
from util import NotInstalled, AlreadyInstalled
SOCKET_ADDRESS = 'state/chess'
RECONNECT_DELAY_SECONDS = 1
ch_work = []
ch_mode = Mode()
ch_mode.domain = 'ch'
ch_link = util.LinkSet()
ch_link.link_module(std)
ch_link.link(DATA, append)
ch_link.link(BUFFER, shrug, '\n')
if '--debug' in sys.argv: ch_link.link_module(debug)
ab_mode = None
ab_link = util.LinkSet()
@ab_link(('HELP', 'chess'))
def h_help(bot, reply, args):
reply('chess start',
'Starts a new game of chess.')
reply('chess rf RF',
'Moves the piece at rank r file f to rank R file F.')
reply('chess M [r|f|rf] RF',
'Moves a piece of type M to rank R file F'
' (moving from rank r and/or file f, if specified).')
reply('chess [r|f] RF',
'Moves a pawn to rank R file F'
' (moving from rank r or file f, if specified).')
reply('chess stop',
'Cancels the current game of chess.')
def init_work(address):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
work = Work(ch_mode, sock)
work.address = address
ch_work.append(work)
work.setblocking(0)
work.connect_ex(address)
def kill_work(work):
work.destroy()
work.shutdown(socket.SHUT_RDWR)
work.close()
ch_work.remove(work)
def install(bot):
global ab_mode
if ab_mode is not None: raise AlreadyInstalled
ab_mode = bot
ab_link.install(ab_mode)
ch_link.install(ch_mode)
init_work(SOCKET_ADDRESS)
def uninstall(bot):
global ab_mode
if ab_mode is None: raise NotInstalled
ch_link.uninstall(ch_mode)
while len(ch_work):
kill_work(ch_work[0])
ab_link.uninstall(ab_mode)
ab_mode = None
@ab_link('!chess')
def h_chess(bot, id, target, args, full_msg):
if not target: return
for work in ch_work:
work.dump('%s <%s> %s\n' % (target, id.nick, args))
@ch_link(FOUND)
def ch_found(work, line):
match = re.match(r'(#\S+) (.*)', line.strip())
if not match: return
ab_mode.send_msg(match.group(1), match.group(2))
@ch_link(CLOSE)
@ch_link(RECV_ERR)
def ch_close_recv_error(work, *args):
kill_work(work)
yield runtime.sleep(RECONNECT_DELAY_SECONDS)
init_work(work.address)
| joodicator/PageBot | page/chess.py | Python | lgpl-3.0 | 2,574 |
#Hecho en python 3.5
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
librosCodigo = {"Francés":[13735,13808],"Español":[24925,15027],"Portugés":[14904,16384],"Inglés":[10422,1013]}
dic_idiomas={}
#hola dos
for idioma in librosCodigo.keys():
diccionario_largo_palabras={}
for indeCo in librosCodigo[idioma]:
texto= strip_headers(load_etext(indeCo))
dic_idiomas[idioma]= diccionario_largo_palabras
for caracter_especial in ['"',"...","¿","?","=","_","[","]","(",")",",",".",":",";","!","¡","«","»","*","~","' "," '","- "," -","--"]:
texto=texto.replace(caracter_especial," ")
palabras=texto.split()
for palabra in palabras:
largo_palabra = len(palabra)
if largo_palabra in diccionario_largo_palabras:
diccionario_largo_palabras[largo_palabra] = diccionario_largo_palabras[largo_palabra]+1
else:
diccionario_largo_palabras[largo_palabra]= 1
print (dic_idiomas)
| DiplomadoACL/problemasenclase | Problema2/problema2crocha.py | Python | lgpl-3.0 | 1,040 |
by_ext = [
('py.png', 'py'),
('python.png', 'pyc'),
('page_white_text_width.png', ['md', 'markdown', 'rst', 'rtf']),
('page_white_text.png', 'txt'),
('page_white_code.png', ['html', 'htm', 'cgi']),
('page_white_visualstudio.png', ['asp', 'vb']),
('page_white_ruby.png', 'rb'),
('page_code.png', 'xhtml'),
('page_white_code_red.png', ['xml', 'xsl', 'xslt', 'yml']),
('script.png', ['js', 'json', 'applescript', 'htc']),
('layout.png', ['css', 'less']),
('page_white_php.png', 'php'),
('page_white_c.png', 'c'),
('page_white_cplusplus.png', 'cpp'),
('page_white_h.png', 'h'),
('database.png', ['db', 'sqlite', 'sqlite3']),
('page_white_database.png', 'sql'),
('page_white_gear.png', ['conf', 'cfg', 'ini', 'reg', 'sys']),
('page_white_zip.png', ['zip', 'tar', 'gz', 'tgz', '7z', 'alz', 'rar',
'bin', 'cab']),
('cup.png', 'jar'),
('page_white_cup.png', ['java', 'jsp']),
('application_osx_terminal.png', 'sh'),
('page_white_acrobat.png', 'pdf'),
('package.png', ['pkg', 'dmg']),
('shape_group.png', ['ai', 'svg', 'eps']),
('application_osx.png', 'app'),
('cursor.png', 'cur'),
('feed.png', 'rss'),
('cd.png', ['iso', 'vcd', 'toast']),
('page_white_powerpoint.png', ['ppt', 'pptx']),
('page_white_excel.png', ['xls', 'xlsx', 'csv']),
('page_white_word.png', ['doc', 'docx']),
('page_white_flash.png', 'swf'),
('page_white_actionscript.png', ['fla', 'as']),
('comment.png', 'smi'),
('disk.png', ['bak', 'bup']),
('application_xp_terminal.png', ['bat', 'com']),
('application.png', 'exe'),
('key.png', 'cer'),
('cog.png', ['dll', 'so']),
('pictures.png', 'ics'),
('picture.png', ['gif', 'png', 'jpg', 'jpeg', 'bmp', 'ico']),
('film.png', ['avi', 'mkv']),
('error.png', 'log'),
('music.png', ['mpa', 'mp3', 'off', 'wav']),
('font.png', ['ttf', 'eot']),
('vcard.png', 'vcf')
]
ICONS_BY_NAME = dict(
Makefile='page_white_gear.png',
Rakefile='page_white_gear.png',
README='page_white_text_width.png',
LICENSE='shield.png',
)
ICONS_BY_EXT = dict()
for icon, exts in by_ext:
if not isinstance(exts, list):
exts = [exts]
for e in exts:
ICONS_BY_EXT[e] = icon
| klen/pyserve | pyserve/icons.py | Python | lgpl-3.0 | 2,305 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
from __future__ import absolute_import
import math
import re
from psi4 import core
from psi4.driver.p4util import constants, filter_comments
from psi4.driver.inputparser import process_pubchem_command, pubchemre
def extract_clusters(mol, ghost=True, cluster_size=0):
"""Function to return all subclusters of the molecule *mol* of
real size *cluster_size* and all other atoms ghosted if *ghost*
equals true, all other atoms discarded if *ghost* is false. If
*cluster_size* = 0, returns all possible combinations of cluster size.
"""
# How many levels of clusters are possible?
nfrag = mol.nfragments()
# Initialize the cluster array
clusters = []
# scope the arrays
reals = []
ghosts = []
# counter
counter = 0
# loop over all possible cluster sizes
for nreal in range(nfrag, 0, -1):
# if a specific cluster size size is requested, only do that
if (nreal != cluster_size and cluster_size > 0):
continue
# initialize the reals list
reals = []
# setup first combination [3,2,1] lexical ordering
# fragments indexing is 1's based, bloody hell
for index in range(nreal, 0, -1):
reals.append(index)
# start loop through lexical promotion
while True:
counter = counter + 1
# Generate cluster from last iteration
if (ghost):
ghosts = []
for g in range(nfrag, 0, -1):
if (g not in reals):
ghosts.append(g)
clusters.append(mol.extract_subsets(reals, ghosts))
else:
clusters.append(mol.extract_subsets(reals))
# reset rank
rank = 0
# look for lexical promotion opportunity
# i.e.: [4 2 1] has a promotion opportunity at
# index 1 to produce [4 3 1]
for k in range(nreal - 2, -1, -1):
if (reals[k] != reals[k + 1] + 1):
rank = k + 1
break
# do the promotion
reals[rank] = reals[rank] + 1
# demote the right portion of the register
val = 1
for k in range(nreal - 1, rank, -1):
reals[k] = val
val = val + 1
# boundary condition is promotion into
# [nfrag+1 nfrag-1 ...]
if (reals[0] > nfrag):
break
return clusters
def extract_cluster_indexing(mol, cluster_size=0):
"""Function to returns a LIST of all subclusters of the molecule *mol* of
real size *cluster_size*. If *cluster_size* = 0, returns all possible
combinations of cluster size.
"""
import copy
# How many levels of clusters are possible?
nfrag = mol.nfragments()
# Initialize the cluster array
clusters = []
# scope the arrays
reals = []
# counter
counter = 0
# loop over all possible cluster sizes
for nreal in range(nfrag, 0, -1):
# if a specific cluster size size is requested, only do that
if (nreal != cluster_size and cluster_size > 0):
continue
# initialize the reals list
reals = []
# setup first combination [3,2,1] lexical ordering
# fragments indexing is 1's based, bloody hell
for index in range(nreal, 0, -1):
reals.append(index)
# start loop through lexical promotion
while True:
counter = counter + 1
# Generate cluster from last iteration
clusters.append(copy.deepcopy(reals))
# reset rank
rank = 0
# look for lexical promotion opportunity
# i.e.: [4 2 1] has a promotion opportunity at
# index 1 to produce [4 3 1]
for k in range(nreal - 2, -1, -1):
if (reals[k] != reals[k + 1] + 1):
rank = k + 1
break
# do the promotion
reals[rank] = reals[rank] + 1
# demote the right portion of the register
val = 1
for k in range(nreal - 1, rank, -1):
reals[k] = val
val = val + 1
# boundary condition is promotion into
# [nfrag+1 nfrag-1 ...]
if (reals[0] > nfrag):
break
return clusters
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
def BFS(self):
"""Perform a breadth-first search (BFS) on the real atoms
in molecule, returning an array of atom indices of fragments.
Relies upon van der Waals radii and so faulty for close
(esp. hydrogen-bonded) fragments. Original code from
Michael S. Marshall.
"""
vdW_diameter = {
'H': 1.001 / 1.5,
'HE': 1.012 / 1.5,
'LI': 0.825 / 1.5,
'BE': 1.408 / 1.5,
'B': 1.485 / 1.5,
'C': 1.452 / 1.5,
'N': 1.397 / 1.5,
'O': 1.342 / 1.5,
'F': 1.287 / 1.5,
'NE': 1.243 / 1.5,
'NA': 1.144 / 1.5,
'MG': 1.364 / 1.5,
'AL': 1.639 / 1.5,
'SI': 1.716 / 1.5,
'P': 1.705 / 1.5,
'S': 1.683 / 1.5,
'CL': 1.639 / 1.5,
'AR': 1.595 / 1.5}
Queue = []
White = range(self.natom()) # untouched
Black = [] # touched and all edges discovered
Fragment = [] # stores fragments
start = 0 # starts with the first atom in the list
Queue.append(start)
White.remove(start)
# Simply start with the first atom, do a BFS when done, go to any
# untouched atom and start again iterate until all atoms belong
# to a fragment group
while White or Queue: # Iterates to the next fragment
Fragment.append([])
while Queue: # BFS within a fragment
for u in Queue: # find all white neighbors to vertex u
for i in White:
dist = constants.bohr2angstroms * math.sqrt(
(self.x(i) - self.x(u)) ** 2 +
(self.y(i) - self.y(u)) ** 2 +
(self.z(i) - self.z(u)) ** 2)
if dist < vdW_diameter[self.symbol(u)] + \
vdW_diameter[self.symbol(i)]:
Queue.append(i) # if you find you, put in the queue
White.remove(i) # & remove it from the untouched list
Queue.remove(u) # remove focus from Queue
Black.append(u)
Fragment[-1].append(int(u)) # add to group (0-indexed)
Fragment[-1].sort() # preserve original atom ordering
if White: # can't move White -> Queue if empty
Queue.append(White[0])
White.remove(White[0])
return Fragment
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.BFS = BFS
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
core.efp_init()
geom = pubchemre.sub(process_pubchem_command, geom)
geom = filter_comments(geom)
molecule = core.Molecule.create_molecule_from_string(geom)
molecule.set_name(name)
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
| jH0ward/psi4 | psi4/driver/molutil.py | Python | lgpl-3.0 | 10,050 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from common_processing import *
import tarfile
import sys
import glob
def untar(ftp_link, out_folder):
tar = tarfile.open(out_folder + ftp_link.split("/")[-1])
tar.extractall(path=out_folder)
tar.close()
def process_nodes_dmp(out_folder):
"""
extract data from file:nodes.dmp
create 2 map_tables:
map_organism2organism
map_organism2rank
"""
map_organism2organism = ""
map_organism2rank = ""
parent_tax_dict = dict()
tax_tree_dict = dict()
with open(out_folder + 'nodes.dmp', 'rb') as f:
for line in f:
tax_id, parent_tax_id, rank, embl_code, division_id, inherited_div_flag, genetic_code_id, inherited_gc_flag, mitochondrial_genetic_code_id, inherited_mgc_flag, genbank_hidden_flag, hidden_subtree_root_flag, comments = line.split("\t|\t")
map_organism2rank += str(tax_id) + "\t" + rank + "\n"
parent_tax_dict.setdefault(tax_id, parent_tax_id)
for tax_id, parent_tax_id in parent_tax_dict.iteritems():
tax_tree_dict.setdefault(tax_id, []).append(parent_tax_id)
while parent_tax_dict[tax_tree_dict[tax_id][-1]] != tax_tree_dict[tax_id][-1]:
tax_tree_dict[tax_id].append(parent_tax_dict[tax_tree_dict[tax_id][-1]])
for tax_id, parent_tax_ids in tax_tree_dict.iteritems():
map_organism2organism += '{}\t{}\t{}\n'.format(tax_id, tax_id, 0)
for level, parent_tax_id in enumerate(parent_tax_ids):
map_organism2organism += '{}\t{}\t{}\n'.format(tax_id, parent_tax_id, level+1)
with open(out_folder + "map_organism2organism.tsv", "wb") as f:
f.write(map_organism2organism)
with open(out_folder + "map_organism2rank.tsv", "wb") as f:
f.write(map_organism2rank)
def process_names_dmp(out_folder):
"""
extract data from file:names.dmp
map_symbol2organism
name type included: scientific name, synonym, acronym, anamorph, misspelling, misnomer, common name,
"""
map_symbol2organism = ''
non_unique_name = set()
with open(out_folder + "names.dmp", "rb") as f:
for line in f:
tax_id, name_txt, unique_name, name_class = line.split("\t|\t")
map_symbol2organism += "{}\t{}\t{}".format(tax_id, name_txt, name_class.split("|")[0].replace("\t", "\n"))
with open(out_folder + "map_symbol2organism.tsv", "wb") as f:
f.write(map_symbol2organism)
def argument_parser():
parser = argparse.ArgumentParser(description="download the Taxonomy PubMed from ftp")
parser.add_argument("-f", "--ftp_link", type=str, help="ftp url link to the file")
parser.add_argument("-o", "--out_folder", type=str, help="target folder of downloaded file")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = argument_parser()
print "processing Taxonomy data"
ftp_download(args.ftp_link, args.out_folder)
untar(args.ftp_link, args.out_folder)
process_nodes_dmp(args.out_folder)
process_names_dmp(args.out_folder)
| TurkuNLP/CAFA3 | sequence_features/process_NCBI_Taxonomy.py | Python | lgpl-3.0 | 3,092 |
# -*- coding: utf-8 -*-
# This file is part of Knitlib.
#
# Knitlib is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Knitlib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Knitlib. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015 Sebastian Oliva <http://github.com/fashiontec/knitlib>
import logging
import time
import knitting_plugin
class DummyKnittingPlugin(knitting_plugin.BaseKnittingPlugin):
"""Implements a sample knitting plugin that allows for simple operation emulation."""
__PLUGIN_NAME__ = u"dummy"
def __init__(self):
super(DummyKnittingPlugin, self).__init__()
base_log_string = u"{} has been called on dummy knitting plugin."
def onknit(self, e):
logging.debug(DummyKnittingPlugin.base_log_string.format("onknit"))
# In order to simulate blocking we make it sleep.
total = 5
for i in range(total):
time.sleep(1)
self.interactive_callbacks["progress"](i / float(total), i, total)
self.finish()
def onfinish(self, e):
logging.debug(DummyKnittingPlugin.base_log_string.format("onfinish"))
def onconfigure(self, e):
logging.debug(DummyKnittingPlugin.base_log_string.format("onconfigure"))
def set_port(self, *args, **kwargs):
pass
@staticmethod
def supported_config_features():
return {"$schema": "http://json-schema.org/schema#", "type": "object"}
| fashiontec/knitlib | src/knitlib/plugins/dummy_plugin.py | Python | lgpl-3.0 | 1,842 |
"""This module holds the common test code.
.. seealso:: `pytest good practices
<https://pytest.org/latest/goodpractices.html>`__ for why this module exists.
"""
import os
import sys
# sys.path makes knittingpattern importable
HERE = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(HERE, "../.."))
__builtins__["HERE"] = HERE
| AllYarnsAreBeautiful/knittingpattern | knittingpattern/test/conftest.py | Python | lgpl-3.0 | 339 |
#
# uchroma - Copyright (C) 2021 Stefanie Kondik
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# pylint: disable=invalid-name
import re
import pydbus
BASE_PATH = '/org/chemlab/UChroma'
SERVICE = 'org.chemlab.UChroma'
class UChromaClient(object):
def __init__(self):
self._bus = pydbus.SessionBus()
def get_device_paths(self) -> list:
dm = self._bus.get(SERVICE)
return dm.GetDevices()
def get_device(self, identifier):
if identifier is None:
return None
use_key = False
if isinstance(identifier, str):
if identifier.startswith(BASE_PATH):
return self._bus.get(SERVICE, identifier)
if re.match(r'\w{4}:\w{4}.\d{2}', identifier):
use_key = True
elif re.match(r'\d+', identifier):
identifier = int(identifier)
else:
return None
for dev_path in self.get_device_paths():
dev = self.get_device(dev_path)
if use_key and identifier == dev.Key:
return dev
elif identifier == dev.DeviceIndex:
return dev
return None
def get_layer(self, device, layer_idx):
layers = device.CurrentRenderers
if layer_idx >= len(layers):
raise ValueError("Layer index out of range")
return self._bus.get(SERVICE, layers[layer_idx][1])
if __name__ == '__main__':
uclient = UChromaClient()
for u_dev_path in uclient.get_device_paths():
u_dev = uclient.get_device(u_dev_path)
print('[%s]: %s (%s / %s)' % \
(u_dev.Key, u_dev.Name, u_dev.SerialNumber, u_dev.FirmwareVersion))
| cyanogen/uchroma | uchroma/client/dbus_client.py | Python | lgpl-3.0 | 2,107 |
"""
WSGI config for CongCards project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CongCards.settings")
application = Cling(get_wsgi_application())
| SoftButterfly/CongCards | CongCards/wsgi.py | Python | lgpl-3.0 | 430 |
#!/home/toppatch/Python-2.7.5/bin/python2.7
"""An RFC 2821 smtp proxy.
Usage: %(program)s [options] [localhost:localport [remotehost:remoteport]]
Options:
--nosetuid
-n
This program generally tries to setuid `nobody', unless this flag is
set. The setuid call will fail if this program is not run as root (in
which case, use this flag).
--version
-V
Print the version number and exit.
--class classname
-c classname
Use `classname' as the concrete SMTP proxy class. Uses `PureProxy' by
default.
--debug
-d
Turn on debugging prints.
--help
-h
Print this message and exit.
Version: %(__version__)s
If localhost is not given then `localhost' is used, and if localport is not
given then 8025 is used. If remotehost is not given then `localhost' is used,
and if remoteport is not given, then 25 is used.
"""
# Overview:
#
# This file implements the minimal SMTP protocol as defined in RFC 821. It
# has a hierarchy of classes which implement the backend functionality for the
# smtpd. A number of classes are provided:
#
# SMTPServer - the base class for the backend. Raises NotImplementedError
# if you try to use it.
#
# DebuggingServer - simply prints each message it receives on stdout.
#
# PureProxy - Proxies all messages to a real smtpd which does final
# delivery. One known problem with this class is that it doesn't handle
# SMTP errors from the backend server at all. This should be fixed
# (contributions are welcome!).
#
# MailmanProxy - An experimental hack to work with GNU Mailman
# <www.list.org>. Using this server as your real incoming smtpd, your
# mailhost will automatically recognize and accept mail destined to Mailman
# lists when those lists are created. Every message not destined for a list
# gets forwarded to a real backend smtpd, as with PureProxy. Again, errors
# are not handled correctly yet.
#
# Please note that this script requires Python 2.0
#
# Author: Barry Warsaw <[email protected]>
#
# TODO:
#
# - support mailbox delivery
# - alias files
# - ESMTP
# - handle error codes from the backend smtpd
import sys
import os
import errno
import getopt
import time
import socket
import asyncore
import asynchat
__all__ = ["SMTPServer","DebuggingServer","PureProxy","MailmanProxy"]
program = sys.argv[0]
__version__ = 'Python SMTP proxy version 0.2'
class Devnull:
def write(self, msg): pass
def flush(self): pass
DEBUGSTREAM = Devnull()
NEWLINE = '\n'
EMPTYSTRING = ''
COMMASPACE = ', '
def usage(code, msg=''):
print >> sys.stderr, __doc__ % globals()
if msg:
print >> sys.stderr, msg
sys.exit(code)
class SMTPChannel(asynchat.async_chat):
COMMAND = 0
DATA = 1
def __init__(self, server, conn, addr):
asynchat.async_chat.__init__(self, conn)
self.__server = server
self.__conn = conn
self.__addr = addr
self.__line = []
self.__state = self.COMMAND
self.__greeting = 0
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__fqdn = socket.getfqdn()
try:
self.__peer = conn.getpeername()
except socket.error, err:
# a race condition may occur if the other end is closing
# before we can get the peername
self.close()
if err[0] != errno.ENOTCONN:
raise
return
print >> DEBUGSTREAM, 'Peer:', repr(self.__peer)
self.push('220 %s %s' % (self.__fqdn, __version__))
self.set_terminator('\r\n')
# Overrides base class for convenience
def push(self, msg):
asynchat.async_chat.push(self, msg + '\r\n')
# Implementation of base class abstract method
def collect_incoming_data(self, data):
self.__line.append(data)
# Implementation of base class abstract method
def found_terminator(self):
line = EMPTYSTRING.join(self.__line)
print >> DEBUGSTREAM, 'Data:', repr(line)
self.__line = []
if self.__state == self.COMMAND:
if not line:
self.push('500 Error: bad syntax')
return
method = None
i = line.find(' ')
if i < 0:
command = line.upper()
arg = None
else:
command = line[:i].upper()
arg = line[i+1:].strip()
method = getattr(self, 'smtp_' + command, None)
if not method:
self.push('502 Error: command "%s" not implemented' % command)
return
method(arg)
return
else:
if self.__state != self.DATA:
self.push('451 Internal confusion')
return
# Remove extraneous carriage returns and de-transparency according
# to RFC 821, Section 4.5.2.
data = []
for text in line.split('\r\n'):
if text and text[0] == '.':
data.append(text[1:])
else:
data.append(text)
self.__data = NEWLINE.join(data)
status = self.__server.process_message(self.__peer,
self.__mailfrom,
self.__rcpttos,
self.__data)
self.__rcpttos = []
self.__mailfrom = None
self.__state = self.COMMAND
self.set_terminator('\r\n')
if not status:
self.push('250 Ok')
else:
self.push(status)
# SMTP and ESMTP commands
def smtp_HELO(self, arg):
if not arg:
self.push('501 Syntax: HELO hostname')
return
if self.__greeting:
self.push('503 Duplicate HELO/EHLO')
else:
self.__greeting = arg
self.push('250 %s' % self.__fqdn)
def smtp_NOOP(self, arg):
if arg:
self.push('501 Syntax: NOOP')
else:
self.push('250 Ok')
def smtp_QUIT(self, arg):
# args is ignored
self.push('221 Bye')
self.close_when_done()
# factored
def __getaddr(self, keyword, arg):
address = None
keylen = len(keyword)
if arg[:keylen].upper() == keyword:
address = arg[keylen:].strip()
if not address:
pass
elif address[0] == '<' and address[-1] == '>' and address != '<>':
# Addresses can be in the form <[email protected]> but watch out
# for null address, e.g. <>
address = address[1:-1]
return address
def smtp_MAIL(self, arg):
print >> DEBUGSTREAM, '===> MAIL', arg
address = self.__getaddr('FROM:', arg) if arg else None
if not address:
self.push('501 Syntax: MAIL FROM:<address>')
return
if self.__mailfrom:
self.push('503 Error: nested MAIL command')
return
self.__mailfrom = address
print >> DEBUGSTREAM, 'sender:', self.__mailfrom
self.push('250 Ok')
def smtp_RCPT(self, arg):
print >> DEBUGSTREAM, '===> RCPT', arg
if not self.__mailfrom:
self.push('503 Error: need MAIL command')
return
address = self.__getaddr('TO:', arg) if arg else None
if not address:
self.push('501 Syntax: RCPT TO: <address>')
return
self.__rcpttos.append(address)
print >> DEBUGSTREAM, 'recips:', self.__rcpttos
self.push('250 Ok')
def smtp_RSET(self, arg):
if arg:
self.push('501 Syntax: RSET')
return
# Resets the sender, recipients, and data, but not the greeting
self.__mailfrom = None
self.__rcpttos = []
self.__data = ''
self.__state = self.COMMAND
self.push('250 Ok')
def smtp_DATA(self, arg):
if not self.__rcpttos:
self.push('503 Error: need RCPT command')
return
if arg:
self.push('501 Syntax: DATA')
return
self.__state = self.DATA
self.set_terminator('\r\n.\r\n')
self.push('354 End data with <CR><LF>.<CR><LF>')
class SMTPServer(asyncore.dispatcher):
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def handle_accept(self):
pair = self.accept()
if pair is not None:
conn, addr = pair
print >> DEBUGSTREAM, 'Incoming connection from %s' % repr(addr)
channel = SMTPChannel(self, conn, addr)
# API for "doing something useful with the message"
def process_message(self, peer, mailfrom, rcpttos, data):
"""Override this abstract method to handle messages from the client.
peer is a tuple containing (ipaddr, port) of the client that made the
socket connection to our smtp port.
mailfrom is the raw address the client claims the message is coming
from.
rcpttos is a list of raw addresses the client wishes to deliver the
message to.
data is a string containing the entire full text of the message,
headers (if supplied) and all. It has been `de-transparencied'
according to RFC 821, Section 4.5.2. In other words, a line
containing a `.' followed by other text has had the leading dot
removed.
This function should return None, for a normal `250 Ok' response;
otherwise it returns the desired response string in RFC 821 format.
"""
raise NotImplementedError
class DebuggingServer(SMTPServer):
# Do something with the gathered message
def process_message(self, peer, mailfrom, rcpttos, data):
inheaders = 1
lines = data.split('\n')
print '---------- MESSAGE FOLLOWS ----------'
for line in lines:
# headers first
if inheaders and not line:
print 'X-Peer:', peer[0]
inheaders = 0
print line
print '------------ END MESSAGE ------------'
class PureProxy(SMTPServer):
def process_message(self, peer, mailfrom, rcpttos, data):
lines = data.split('\n')
# Look for the last header
i = 0
for line in lines:
if not line:
break
i += 1
lines.insert(i, 'X-Peer: %s' % peer[0])
data = NEWLINE.join(lines)
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got some refusals:', refused
def _deliver(self, mailfrom, rcpttos, data):
import smtplib
refused = {}
try:
s = smtplib.SMTP()
s.connect(self._remoteaddr[0], self._remoteaddr[1])
try:
refused = s.sendmail(mailfrom, rcpttos, data)
finally:
s.quit()
except smtplib.SMTPRecipientsRefused, e:
print >> DEBUGSTREAM, 'got SMTPRecipientsRefused'
refused = e.recipients
except (socket.error, smtplib.SMTPException), e:
print >> DEBUGSTREAM, 'got', e.__class__
# All recipients were refused. If the exception had an associated
# error code, use it. Otherwise,fake it with a non-triggering
# exception code.
errcode = getattr(e, 'smtp_code', -1)
errmsg = getattr(e, 'smtp_error', 'ignore')
for r in rcpttos:
refused[r] = (errcode, errmsg)
return refused
class MailmanProxy(PureProxy):
def process_message(self, peer, mailfrom, rcpttos, data):
from cStringIO import StringIO
from Mailman import Utils
from Mailman import Message
from Mailman import MailList
# If the message is to a Mailman mailing list, then we'll invoke the
# Mailman script directly, without going through the real smtpd.
# Otherwise we'll forward it to the local proxy for disposition.
listnames = []
for rcpt in rcpttos:
local = rcpt.lower().split('@')[0]
# We allow the following variations on the theme
# listname
# listname-admin
# listname-owner
# listname-request
# listname-join
# listname-leave
parts = local.split('-')
if len(parts) > 2:
continue
listname = parts[0]
if len(parts) == 2:
command = parts[1]
else:
command = ''
if not Utils.list_exists(listname) or command not in (
'', 'admin', 'owner', 'request', 'join', 'leave'):
continue
listnames.append((rcpt, listname, command))
# Remove all list recipients from rcpttos and forward what we're not
# going to take care of ourselves. Linear removal should be fine
# since we don't expect a large number of recipients.
for rcpt, listname, command in listnames:
rcpttos.remove(rcpt)
# If there's any non-list destined recipients left,
print >> DEBUGSTREAM, 'forwarding recips:', ' '.join(rcpttos)
if rcpttos:
refused = self._deliver(mailfrom, rcpttos, data)
# TBD: what to do with refused addresses?
print >> DEBUGSTREAM, 'we got refusals:', refused
# Now deliver directly to the list commands
mlists = {}
s = StringIO(data)
msg = Message.Message(s)
# These headers are required for the proper execution of Mailman. All
# MTAs in existence seem to add these if the original message doesn't
# have them.
if not msg.getheader('from'):
msg['From'] = mailfrom
if not msg.getheader('date'):
msg['Date'] = time.ctime(time.time())
for rcpt, listname, command in listnames:
print >> DEBUGSTREAM, 'sending message to', rcpt
mlist = mlists.get(listname)
if not mlist:
mlist = MailList.MailList(listname, lock=0)
mlists[listname] = mlist
# dispatch on the type of command
if command == '':
# post
msg.Enqueue(mlist, tolist=1)
elif command == 'admin':
msg.Enqueue(mlist, toadmin=1)
elif command == 'owner':
msg.Enqueue(mlist, toowner=1)
elif command == 'request':
msg.Enqueue(mlist, torequest=1)
elif command in ('join', 'leave'):
# TBD: this is a hack!
if command == 'join':
msg['Subject'] = 'subscribe'
else:
msg['Subject'] = 'unsubscribe'
msg.Enqueue(mlist, torequest=1)
class Options:
setuid = 1
classname = 'PureProxy'
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except ValueError:
usage(1, 'Bad remote port: %s' % remotespec)
return options
if __name__ == '__main__':
options = parseargs()
# Become nobody
classname = options.classname
if "." in classname:
lastdot = classname.rfind(".")
mod = __import__(classname[:lastdot], globals(), locals(), [""])
classname = classname[lastdot+1:]
else:
import __main__ as mod
class_ = getattr(mod, classname)
proxy = class_((options.localhost, options.localport),
(options.remotehost, options.remoteport))
if options.setuid:
try:
import pwd
except ImportError:
print >> sys.stderr, \
'Cannot import module "pwd"; try running with -n option.'
sys.exit(1)
nobody = pwd.getpwnam('nobody')[2]
try:
os.setuid(nobody)
except OSError, e:
if e.errno != errno.EPERM: raise
print >> sys.stderr, \
'Cannot setuid "nobody"; try running with -n option.'
sys.exit(1)
try:
asyncore.loop()
except KeyboardInterrupt:
pass
| vFense/vFenseAgent-nix | agent/deps/rpm6/Python-2.7.5/bin/smtpd.py | Python | lgpl-3.0 | 18,564 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hischool.settings")
# Add the "core" and "extensions" folders to the path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "extensions"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "core"))
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| HiSchoolProject/BackSchool | manage.py | Python | lgpl-3.0 | 1,016 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of nmaptocsv.
#
# Copyright (C) 2012, 2019 Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# nmaptocsv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nmaptocsv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with nmaptocsv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Global imports
import sys
import re
import csv
import struct
import socket
import itertools
import argparse
import xml.etree.cElementTree as ET
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
izip = itertools.izip
fd_read_options = 'rb'
fd_write_options = 'wb'
else:
izip = zip
fd_read_options = 'r'
fd_write_options = 'w'
# Script version
VERSION = '1.6'
# Options definition
parser = argparse.ArgumentParser()
# Options definition
mandatory_grp = parser.add_argument_group('Mandatory parameters')
mandatory_grp.add_argument('-i', '--input', help = 'Nmap scan output file in normal (-oN) or Grepable (-oG) format (stdin if not specified)')
mandatory_grp.add_argument('-x', '--xml-input', help = 'Nmap scan output file in XML (-oX) format')
output_grp = parser.add_argument_group('Output parameters')
output_grp.add_argument('-o', '--output', help = 'CSV output filename (stdout if not specified)')
output_grp.add_argument('-f', '--format', help = 'CSV column format { fqdn, rdns, hop_number, ip, mac_address, mac_vendor, port, protocol, os, script, service, version } (default: ip-fqdn-port-protocol-service-version)', default = 'ip-fqdn-port-protocol-service-version')
output_grp.add_argument('-S', '--script', help = 'Adds the script column in output, alias for -f "ip-fqdn-port-protocol-service-version-script"', action = 'store_const', const = 'ip-fqdn-port-protocol-service-version-script')
output_grp.add_argument('-d', '--delimiter', help = 'CSV output delimiter (default ";"). Ex: -d ","', default = ';')
output_grp.add_argument('-n', '--no-newline', help = 'Do not insert a newline between each host. By default, a newline is added for better readability', action = 'store_true', default = False)
output_grp.add_argument('-s', '--skip-header', help = 'Do not print the CSV header', action = 'store_true', default = False)
# Handful patterns
#-- IP regex
p_ip_elementary = r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})'
p_mac_elementary = r'[0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F]'
# Nmap Normal Output patterns
#-- Target
p_ip_nmap5 = r'Interesting.*on\s(?:(?P<fqdn_nmap5>.*) (?=\((?P<ip_nmap5>%s)\)))|Interesting.*on\s(?P<ip_only_nmap5>.*)\:' % p_ip_elementary
p_ip_nmap6 = r'Nmap.*for\s(?:(?P<fqdn_nmap6>.*) (?=\((?P<ip_nmap6>%s)\)))|Nmap.*for\s(?P<ip_only_nmap6>%s)$' % (p_ip_elementary, p_ip_elementary)
p_ip = re.compile('%s|%s' % (p_ip_nmap5, p_ip_nmap6))
#-- rDNS
p_rdns = re.compile(r'rDNS record for (?P<ip>%s):\s(?P<rdns>.*)$' % p_ip_elementary)
#-- Port header
p_port_header = re.compile(r'^(?P<port>PORT)\s+(?P<state>STATE)\s+(?P<service>SERVICE)\s+(?P<reason>REASON\s*)?(?P<version>VERSION$)?')
#-- Port finding
p_port_without_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)(?:\s*(?P<version>.*))?$')
p_port_with_reason = re.compile(r'^(?P<number>[\d]+)\/(?P<protocol>tcp|udp)\s+(?:open|open\|filtered)\s+(?P<service>[\w\S]*)\s+(?P<reason>.* ttl [\d]+)\s*(?:\s*(?P<version>.*))$')
#-- Script output finding
p_script = re.compile(r'^\|[\s|\_](?P<script>.*)$')
#-- MAC address
p_mac = re.compile(r'MAC Address:\s(?P<mac_addr>(%s))\s\((?P<mac_vendor>.*)\)' % p_mac_elementary)
#-- OS detection (pattern order is important, the latter position the more precise and reliable the information is)
p_os = re.compile(r'(?:^Service Info: OS|^OS CPE|\s+OS|^OS details|smb-os-discovery|\|):\s(?P<os>[^;]+)')
#-- Network distance
p_network_dist = re.compile(r'Network Distance:\s(?P<hop_number>\d+)\shops?')
# Nmap Grepable output
#-- Target, Ports
p_grepable = re.compile(r'(?P<whole_line>^Host:\s.*)')
# Handful functions
def dottedquad_to_num(ip):
"""
Convert decimal dotted quad string IP to long integer
"""
return struct.unpack('!L',socket.inet_aton(ip))[0]
def num_to_dottedquad(n):
"""
Convert long int IP to dotted quad string
"""
return socket.inet_ntoa(struct.pack('!L',n))
def unique_match_from_list(list):
"""
Check the list for a potential pattern match
@param list : a list of potential matching groups
@rtype : return the string representation of the unique value that matched, or nothing if nothing matched
"""
result = ''
for item in list:
if item != None:
result = str(item)
return result
def extract_matching_pattern(regex, group_name, unfiltered_list):
"""
Return the desired group_name from a list of matching patterns
@param regex : a regular expression with named groups
@param group_name : the desired matching group name value
@param unfiltered_list : a list of matches
@rtype : the string value
"""
result = ''
filtered_list = list(filter(regex.search, unfiltered_list))
if len(filtered_list) == 1:
filtered_string = ''.join(filtered_list)
result = regex.search(filtered_string).group(group_name)
return result
class Host:
def __init__(self, ip, fqdn=''):
self.ip_dottedquad = ip
self.ip_num = dottedquad_to_num(ip)
self.fqdn = fqdn
self.rdns = ''
self.ports = []
self.os = ''
self.mac_address = ''
self.mac_address_vendor = ''
self.network_distance = ''
def add_port(self, port):
self.ports.append(port)
# Getters
def get_ip_num_format(self):
return str(self.ip_num)
def get_ip_dotted_format(self):
return str(self.ip_dottedquad)
def get_fqdn(self):
return str(self.fqdn)
def get_rdns_record(self):
return str(self.rdns)
def get_port_list(self):
return self.ports
def get_port_number_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_number())
return result
def get_port_protocol_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_protocol())
return result
def get_port_service_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_service())
return result
def get_port_version_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_version())
return result
def get_port_script_list(self):
if not(self.get_port_list()):
return ['']
else:
result = []
for port in self.get_port_list():
result.append(port.get_script())
return result
def get_os(self):
return str(self.os)
def get_mac_address(self):
return str(self.mac_address)
def get_mac_address_vendor(self):
return str(self.mac_address_vendor)
def get_network_distance(self):
return str(self.network_distance)
# Setters
def set_fqdn(self, fqdn):
self.fqdn = fqdn
def set_rdns_record(self, rdns_record):
self.rdns = rdns_record
def set_os(self, os):
self.os = os
def set_mac(self, mac_address, mac_address_vendor = ''):
self.mac_address = mac_address
self.mac_address_vendor = mac_address_vendor
def set_network_distance(self, network_distance):
self.network_distance = network_distance
class Port:
def __init__(self, number, protocol, service='', version='', script=''):
self.number = number
self.protocol = protocol
self.service = service
self.version = version
self.script = script
def get_number(self):
return self.number
def get_protocol(self):
return self.protocol
def get_service(self):
return self.service
def get_version(self):
return self.version
def get_script(self):
return self.script.strip()
def set_service(self, service):
self.service = service
def set_version(self, version):
self.version = version
def set_script(self, script):
self.script = script
def split_grepable_match(raw_string):
"""
Split the raw line to a neat Host object
@param raw_string : the whole 'Host' line
@rtype : return an Host object
"""
global p_ip_elementary
splitted_fields = raw_string.split("\t")
# Patterns
p_host = re.compile(r'Host:\s(?P<ip>%s)\s+\((?P<fqdn>|.*)\)' % p_ip_elementary)
p_ports = re.compile(r'Ports:\s+(?P<ports>.*)/')
p_os = re.compile(r'OS:\s(?P<os>.*)')
# Extracted named-group matches
IP_str = extract_matching_pattern(p_host, 'ip', splitted_fields)
FQDN_str = extract_matching_pattern(p_host, 'fqdn', splitted_fields)
ports_str = extract_matching_pattern(p_ports, 'ports', splitted_fields)
OS_str = extract_matching_pattern(p_os, 'os', splitted_fields)
current_host = Host(IP_str, FQDN_str)
current_host.set_os(OS_str)
# Let's split the raw port list
all_ports = ports_str.split(', ')
# Keep only open ports
open_ports_list = filter(lambda p: '/open/' in p, all_ports)
for open_port in open_ports_list:
# Extract each field from the format [port number / state / protocol / owner / service / rpc info / version info]
# -- Thanks to http://www.unspecific.com/nmap-oG-output/
number, state, protocol, owner, service, version = open_port.split('/', 5)
# remove potential leading and trailing slashes on version
version = version.strip('/')
new_port = Port(number, protocol, service, version)
current_host.add_port(new_port)
return current_host
def parse(fd):
"""
Parse the data according to several regexes
@param fd : input file descriptor, could be a true file or stdin
@rtype : return a list of <Host> objects indexed from their numerical IP representation
"""
global p_ip_elementary, p_ip, p_port_without_reason, p_port_with_reason, p_grepable, p_script, p_mac, p_os, p_network_dist, p_rdns
IPs = {}
last_host = None
p_port = p_port_without_reason
in_script_line = False
script = ''
lines = [l.rstrip() for l in fd.readlines()]
for line in lines:
# 1st case: Nmap Normal Output
#-- 1st action: Grab the IP
IP = p_ip.search(line)
if IP:
# Check out what patterns matched
IP_potential_match = [IP.group('ip_nmap5'), IP.group('ip_only_nmap5'), IP.group('ip_nmap6'), IP.group('ip_only_nmap6')]
IP_str = unique_match_from_list(IP_potential_match)
FQDN_potential_match = [IP.group('fqdn_nmap5'), IP.group('fqdn_nmap6')]
FQDN_str = unique_match_from_list(FQDN_potential_match)
new_host = Host(IP_str, FQDN_str)
IPs[new_host.get_ip_num_format()] = new_host
last_host = new_host
# 1st case: Nmap Normal Output
#-- 2nd action: Check if there is a rDNS record
rDNS = p_rdns.search(line)
if rDNS:
if rDNS.group('ip') and rDNS.group('rdns'):
rdns_ip_num_format = str(dottedquad_to_num(rDNS.group('ip')))
if rdns_ip_num_format in IPs.keys():
IPs[rdns_ip_num_format].set_rdns_record(rDNS.group('rdns'))
# 1st case: Nmap Normal Output
#-- 3rd action: Check the port header, to know if there is a reason column
port_header = p_port_header.search(line)
if port_header:
if port_header.group('reason'):
p_port = p_port_with_reason
else:
p_port = p_port_without_reason
# 1st case: Nmap Normal Output
#-- 4th action: Grab the script output
script_line = p_script.search(line)
if script_line:
in_script_line = True
script = script + script_line.group('script') + '\n'
else:
# We were in a script output section, now it's finished
if in_script_line:
last_port = last_host.get_port_list()[-1]
last_port = last_port.set_script(script)
# reseting trackers
in_script_line = False
script = ''
# 1st case: Nmap Normal Output
#-- 5th action: Grab the port
port = p_port.search(line)
if port and last_host != None:
number = str(port.group('number'))
protocol = str(port.group('protocol'))
service = str(port.group('service'))
version = str(port.group('version'))
new_port = Port(number, protocol, service, version)
last_host.add_port(new_port)
# 1st case: Nmap Normal Output
#-- 6th action: Grab the MAC address
mac = p_mac.search(line)
if mac:
last_host.set_mac(str(mac.group('mac_addr')), str(mac.group('mac_vendor')))
# 1st case: Nmap Normal Output
#-- 7th action: Grab the OS detection
os = p_os.search(line)
if os:
last_host.set_os(str(os.group('os')))
# 1st case: Nmap Normal Output
#-- 8th action: Grab the network distance
network_distance = p_network_dist.search(line)
if network_distance:
last_host.set_network_distance(str(network_distance.group('hop_number')))
# 2nd case: Nmap Grepable Output
#-- 1 sole action: Grab the whole line for further splitting
grepable = p_grepable.search(line)
if grepable:
if grepable.group('whole_line'):
new_host = split_grepable_match(grepable.group('whole_line'))
# Update the occurence found with 'Status: Up'
IPs[new_host.get_ip_num_format()] = new_host
last_host = new_host
return IPs
def parse_xml(xml_file):
"""
Parse the XML file
@param xml_file : the input file
@rtype : return a list of <Host> objects indexed from their numerical IP representation
"""
IPs = {}
try:
tree = ET.ElementTree(file=xml_file)
root = tree.getroot()
except ET.ParseError as e:
print("[!] An error has occurred while parsing the XML file: '%s'.\nExiting" % e)
return None
for host in root.findall('host'):
if 'up' in host.find('status').get('state'):
# IP, MAC
addresses = host.findall('address')
for address in addresses:
if 'ipv4' in address.get('addrtype') and address.get('addr'):
ip_dottedquad = address.get('addr')
new_host = Host(ip_dottedquad)
if 'mac' in address.get('addrtype'):
mac_addr = address.get('addr')
mac_vendor = address.get('vendor')
new_host.set_mac(mac_addr, mac_vendor)
# FQDN, RDNS
hostnames = host.findall('./hostnames/hostname')
for hostname in hostnames:
if hostname.get('name') and 'user' in hostname.get('type'):
new_host.set_fqdn(hostname.get('name'))
if hostname.get('name') and 'PTR' in hostname.get('type'):
new_host.set_rdns_record(hostname.get('name'))
# Ports (protocol, number, service, version) and script output
open_ports = host.findall("./ports/port/state[@state='open']/..")
for port in open_ports:
protocol = port.get('protocol')
number = port.get('portid')
new_port = Port(number, protocol)
service = port.find('service')
if service != None:
service_name = service.get('name') if service.get('name') else ''
service_product = service.get('product') if service.get('product') else ''
service_version = service.get('version') if service.get('version') else ''
service_extrainfo = service.get('extrainfo') if service.get('extrainfo') else ''
version = ("%s %s %s" % (service_product, service_version, service_extrainfo)).strip()
new_port.set_service(service_name)
new_port.set_version(version)
scripts = port.findall('script')
script_output = ''
for script in scripts:
script_output = script_output + "\n%s: %s" % (script.get('id'), script.get('output'))
new_port.set_script(script_output)
new_host.add_port(new_port)
# OS
osmatches = host.findall('./os/osmatch')
os = "|".join(osmatch.get('name') for osmatch in osmatches)
new_host.set_os(os)
# Hop
hop_number = len(host.findall('./trace/hop'))
new_host.set_network_distance(hop_number)
IPs[new_host.get_ip_num_format()] = new_host
return IPs
def is_format_valid(fmt):
"""
Check for the supplied custom output format
@param fmt : the supplied format
@rtype : True or False
"""
supported_format_objects = [ 'fqdn', 'rdns', 'hop_number', 'ip', 'mac_address', 'mac_vendor', 'port', 'protocol', 'os', 'script', 'service', 'version' ]
unknown_items = []
for fmt_object in fmt.split('-'):
if not(fmt_object in supported_format_objects):
unknown_items.append(fmt_object)
if unknown_items:
return False, unknown_items
else:
return True, None
def formatted_item(host, format_item):
"""
return the attribute value related to the host
@param host : host object
@param format_item : the attribute supplied in the custom format
@rtype : the <list> attribute value
"""
if isinstance(host, Host):
option_map = {
'fqdn': [host.get_fqdn()],
'rdns': [host.get_rdns_record()],
'hop_number': [host.get_network_distance()],
'ip': [host.get_ip_dotted_format()],
'mac_address': [host.get_mac_address()],
'mac_vendor': [host.get_mac_address_vendor()],
'os': [host.get_os()],
'port': host.get_port_number_list(),
'protocol': host.get_port_protocol_list(),
'service': host.get_port_service_list(),
'version': host.get_port_version_list(),
'script': host.get_port_script_list()
}
if format_item in option_map.keys():
return option_map[format_item]
else:
return ''
else:
return []
def repeat_attributes(attribute_list):
"""
repeat attribute lists to the maximum for the
@param attribute_list : raw list with different attribute list length
@rtype : a list consisting of length equal attribute list
"""
max_number = len(max(attribute_list, key=len))
attribute_list = map(lambda x: x * max_number, attribute_list)
return attribute_list
def generate_csv(fd, results, options):
"""
Generate a plain ';' separated csv file with the desired or default attribute format
@param fd : output file descriptor, could be a true file or stdout
"""
if results:
spamwriter = csv.writer(fd, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n')
splitted_options_format = options.format.split('-')
if not options.skip_header:
csv_header = [format_item.upper() for format_item in splitted_options_format]
spamwriter.writerow(csv_header)
# for IP in sorted(results.iterkeys())
for IP in sorted(results):
formatted_attribute_list = []
for index,format_item in enumerate(splitted_options_format):
item = formatted_item(results[IP], format_item)
formatted_attribute_list.insert(index, item)
formatted_attribute_list = repeat_attributes(formatted_attribute_list)
for line_to_write in izip(*formatted_attribute_list):
spamwriter.writerow(list(line_to_write))
# Print a newline if asked
if not options.no_newline:
spamwriter.writerow('')
return
def main():
global parser
options = parser.parse_args()
# Supplied format
if options.script:
options.format = options.script
valid_format, unknown_items = is_format_valid(options.format)
if not valid_format:
parser.error("Please specify a valid output format: '%s' is invalid \n\
Supported objects are { fqdn, rdns, hop_number, ip, mac_address, mac_vendor, port, protocol, os, script, service, version }" % ', '.join(unknown_items))
# Input selection
if (options.input != None) and (options.xml_input != None):
parser.error("Please specify either a normal/grepable or an XML input file")
elif (options.input == None) and (options.xml_input != None):
results = parse_xml(options.xml_input)
elif options.xml_input == None:
if options.input != None:
fd_input = open(options.input, fd_read_options)
else:
# No input file specified, reading from stdin
fd_input = sys.stdin
# Analysis
results = parse(fd_input)
fd_input.close()
# Output descriptor
if options.output != None:
fd_output = open(options.output, fd_write_options)
else:
# No output file specified, writing to stdout
fd_output = sys.stdout
# CSV output
generate_csv(fd_output, results, options)
fd_output.close()
return
if __name__ == "__main__":
main() | maaaaz/nmaptocsv | nmaptocsv.py | Python | lgpl-3.0 | 24,214 |
import os, sys
origin_dir = 'del_201304now/'
new_dir = 'freq_event_state/'
files = os.listdir(origin_dir)
state_dir = {}
country_dir = {}
for file in files:
with open(origin_dir + file) as f:
event_dir = {}
for line in f:
tmp_content = line.split('\t')
code = tmp_content[4]
location = tmp_content[14]
tmp_loc = location.split(',')
length = len(tmp_loc)
state = ''
if length == 3:
state = tmp_loc[1]
elif length == 2:
state = tmp_loc[0]
else:
continue
country = tmp_loc[length-1]
if country not in country_dir:
country_dir[country] = {}
if state in country_dir[country]:
tmp_dir = country_dir[country][state]
if code in tmp_dir:
tmp_dir[code] += 1
else:
tmp_dir[code] = 1
else:
country_dir[country][state] = {}
country_dir[country][state][code] = 1
for country_name,countries in country_dir.items():
for state_name, states in countries.items():
dir_path = '%s%s/%s/'%(new_dir, country_name, state_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(dir_path+file, 'a') as writer:
for event, freq in states.items():
writer.write(event+': '+str(freq)+'\n')
| moment-of-peace/EventForecast | association_rule/event_frequent.py | Python | lgpl-3.0 | 1,535 |
"""
Exim SES Transport Entry Points
"""
# Copyright 2013, Jayson Vantuyl <[email protected]>
#
# This file is part of exim_ses_transport.
#
# exim_ses_transport is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# exim_ses_transport is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with exim_ses_transport. If not, see <http://www.gnu.org/licenses/>.
from transport import SesSender
def main():
SesSender().run()
| jvantuyl/exim_ses_transport | exim_ses_transport/run.py | Python | lgpl-3.0 | 880 |
import django
from django.db import models
from pytigon_lib.schdjangoext.fields import *
from pytigon_lib.schdjangoext.models import *
import pytigon_lib.schdjangoext.fields as ext_models
from pytigon_lib.schtools import schjson
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
import os, os.path
import sys
from pytigon_lib.schhtml.htmltools import superstrip
from schwiki.models import *
from schcommander.models import *
from schtasks.models import *
from schtools.models import *
| Splawik/pytigon | pytigon/prj/scheditor/sched/models.py | Python | lgpl-3.0 | 531 |
from gen_data_from_rbprm import *
from hpp.corbaserver.rbprm.tools.com_constraints import get_com_constraint
from hpp.gepetto import PathPlayer
from hpp.corbaserver.rbprm.state_alg import computeIntermediateState, isContactCreated
from numpy import matrix, asarray
from numpy.linalg import norm
from spline import bezier
def __curveToWps(curve):
return asarray(curve.waypoints().transpose()).tolist()
def __Bezier(wps, init_acc = [0.,0.,0.], end_acc = [0.,0.,0.], init_vel = [0.,0.,0.], end_vel = [0.,0.,0.]):
c = curve_constraints();
c.init_vel = matrix(init_vel);
c.end_vel = matrix(end_vel);
c.init_acc = matrix(init_acc);
c.end_acc = matrix(end_acc);
matrix_bezier = matrix(wps).transpose()
curve = bezier(matrix_bezier, c)
return __curveToWps(curve), curve
#~ return __curveToWps(bezier(matrix_bezier))
allpaths = []
def play_all_paths():
for _, pid in enumerate(allpaths):
ppl(pid)
def play_all_paths_smooth():
for i, pid in enumerate(allpaths):
if i % 2 == 1 :
ppl(pid)
def play_all_paths_qs():
for i, pid in enumerate(allpaths):
if i % 2 == 0 :
ppl(pid)
def test(s1,s2, path = False, use_rand = False, just_one_curve = False, num_optim = 0, effector = False, mu=0.5, use_Kin = True) :
q1 = s1.q()
q2 = s2.q()
stateid = s1.sId
stateid1 = s2.sId
sInt = computeIntermediateState(s1,s2)
com_1 = s1.getCenterOfMass()
com_2 = s2.getCenterOfMass()
createPtBox(viewer.client.gui, 0, com_1, 0.01, [0,1,1,1.])
createPtBox(viewer.client.gui, 0, com_2, 0.01, [0,1,1,1.])
#~ isContactCreated_= isContactCreated(s1,s2)
isContactCreated_ = True
data = gen_sequence_data_from_state_objects(s1,s2,sInt,mu = mu, isContactCreated = isContactCreated_)
c_bounds_1 = s1.getComConstraint(limbsCOMConstraints)
c_bounds_mid = sInt.getComConstraint(limbsCOMConstraints)
c_bounds_2 = s2.getComConstraint(limbsCOMConstraints)
success, c_mid_1, c_mid_2 = solve_quasi_static(data, c_bounds = [c_bounds_1, c_bounds_2, c_bounds_mid], use_rand = use_rand, mu = mu, use_Kin = use_Kin)
print "$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ calling effector", effector
paths_ids = []
if path and success:
#~ fullBody.straightPath([c_mid_1[0].tolist(),c_mid_2[0].tolist()])
#~ fullBody.straightPath([c_mid_2[0].tolist(),com_2])
if just_one_curve:
bezier_0, curve = __Bezier([com_1,c_mid_1[0].tolist(),c_mid_2[0].tolist(),com_2])
createPtBox(viewer.client.gui, 0, c_mid_1[0].tolist(), 0.01, [0,1,0,1.])
createPtBox(viewer.client.gui, 0, c_mid_2[0].tolist(), 0.01, [0,1,0,1.])
#testing intermediary configurations
partions = [0.,0.3,0.8,1.]
#~ if(not isContactCreated_):
#~ partions = [0.,0.6,0.8,1.]
print 'paritions:', partions[1], " "
com_interm2 = curve(partions[2])
#~ print "com_1", com_1
#~ print "com_1", curve(partions[0])
#~ print "com_interm2", com_interm2
#~ print "com_2", com_2
#~ print "com_2", curve(partions[-1])
success_proj1 = False;
success_proj2 = False
for _ in range(7):
print "WRTFF", partions[1]
com_interm1 = curve(partions[1])
print "com_interm1", com_interm1
success_proj1 = project_com_colfree(fullBody, stateid , asarray((com_interm1).transpose()).tolist()[0])
if success_proj1:
break
else:
print "decreasing com"
partions[1] -= 0.04
for _ in range(7):
print "WRTFF", partions[-2]
com_interm2 = curve(partions[-2])
print "com_interm2", com_interm2
success_proj2 = project_com_colfree(fullBody, stateid1 , asarray((com_interm2).transpose()).tolist()[0])
if success_proj2:
break
else:
print "decreasing com"
partions[-2] += 0.039
#~ success_proj2 = project_com_colfree(fullBody, stateid1 , asarray((com_interm2).transpose()).tolist()[0])
#~ if success_proj1:
#~ q_1 = fullBody.projectToCom(stateid, asarray((com_interm1).transpose()).tolist()[0])
#~ viewer(q_1)
if not success_proj1:
print "proj 1 failed"
return False, c_mid_1, c_mid_2, paths_ids
if not success_proj2:
print "proj 2 failed"
return False, c_mid_1, c_mid_2, paths_ids
p0 = fullBody.generateCurveTrajParts(bezier_0,partions)
#~ pp.displayPath(p0+1)
#~ pp.displayPath(p0+2)
ppl.displayPath(p0)
#~ ppl.displayPath(p0+1)
#~ ppl.displayPath(p0+2)
#~ ppl.displayPath(p0+3)
if(effector):
#~ assert False, "Cant deal with effectors right now"
paths_ids = [int(el) for el in fullBody.effectorRRT(stateid,p0+1,p0+2,p0+3,num_optim)]
else:
paths_ids = [int(el) for el in fullBody.comRRTFromPosBetweenState(stateid,stateid1,p0+1,p0+2,p0+3,num_optim)]
else:
success_proj1 = project_com_colfree(fullBody, stateid , c_mid_1[0].tolist())
success_proj2 = project_com_colfree(fullBody, stateid1 , c_mid_2[0].tolist())
if not success_proj1:
print "proj 1 failed"
return False, c_mid_1, c_mid_2, paths_ids
if not success_proj2:
print "proj 2 failed"
return False, c_mid_1, c_mid_2, paths_ids
bezier_0, curve = __Bezier([com_1,c_mid_1[0].tolist()] , end_acc = c_mid_1[1].tolist() , end_vel = [0.,0.,0.])
bezier_1, curve = __Bezier([c_mid_1[0].tolist(),c_mid_2[0].tolist()], end_acc = c_mid_2[1].tolist(), init_acc = c_mid_1[1].tolist(), init_vel = [0.,0.,0.], end_vel = [0.,0.,0.])
bezier_2, curve = __Bezier([c_mid_2[0].tolist(),com_2] , init_acc = c_mid_2[1].tolist(), init_vel = [0.,0.,0.])
p0 = fullBody.generateCurveTraj(bezier_0)
fullBody.generateCurveTraj(bezier_1)
fullBody.generateCurveTraj(bezier_2)
ppl.displayPath(p0)
#~ ppl.displayPath(p0+1)
#~ ppl.displayPath(p0+2)
paths_ids = [int(el) for el in fullBody.comRRTFromPosBetweenState(stateid,stateid1, p0,p0+1,p0+2,num_optim)]
#~ paths_ids = []
global allpaths
allpaths += paths_ids[:-1]
#~ allpaths += [paths_ids[-1]]
#~ pp(paths_ids[-1])
#~ return success, paths_ids, c_mid_1, c_mid_2
return success, c_mid_1, c_mid_2, paths_ids
#~ data = gen_sequence_data_from_state(fullBody,3,configs)
#~ pp(29),pp(9),pp(17)
from hpp.corbaserver.rbprm.tools.path_to_trajectory import *
def createPtBox(gui, winId, config, res = 0.01, color = [1,1,1,0.3]):
print "plottiun ", config
#~ resolution = res
#~ global scene
#~ global b_id
#~ boxname = scene+"/"+str(b_id)
#~ b_id += 1
#~ gui.addBox(boxname,resolution,resolution,resolution, color)
#~ gui.applyConfiguration(boxname,[config[0],config[1],config[2],1,0,0,0])
#~ gui.addSceneToWindow(scene,winId)
#~ gui.refresh()
def test_ineq(stateid, constraints, n_samples = 10, color=[1,1,1,1.]):
Kin = get_com_constraint(fullBody, stateid, fullBody.getConfigAtState(stateid), constraints, interm = False)
#~ print "kin ", Kin
#create box around current com
fullBody.setCurrentConfig(fullBody.getConfigAtState(stateid))
com = fullBody.getCenterOfMass()
bounds_c = flatten([[com[i]-1., com[i]+1.] for i in range(3)]) # arbitrary
for i in range(n_samples):
c = array([uniform(bounds_c[2*i], bounds_c[2*i+1]) for i in range(3)])
print "c: ", c
if(Kin[0].dot(c)<=Kin[1]).all():
print "boundaries satisfied"
createPtBox(viewer.client.gui, 0, c, 0.01, color)
#~ test_ineq(0,{ rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : 'RLEG_JOINT5'}}, 1000, [1,0,0,1])
#~ test_ineq(0,{ lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : 'LLEG_JOINT5'}}, 1000, [0,1,0,1])
#~ test_ineq(0,{ rLegId : {'file': "hrp2/RA_com.ineq", 'effector' : rHand}}, 1000, [0,0,1,1])
#~ test_ineq(0,{ rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : 'RLEG_JOINT5'}}, 1000, [0,1,1,1])
#~ test_ineq(0, limbsCOMConstraints, 1000, [0,1,1,1])
def gen(s1, s2, num_optim = 0, ine_curve =True, s = 1., effector = False, mu =0.5, gen_traj = True, use_Kin = True):
n_fail = 0;
#~ viewer(configs[i])
res = test(s1, s2, True, False, ine_curve,num_optim, effector, mu, use_Kin)
if(not res[0]):
print "lp failed"
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [1,0,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [1,0,0,1.])
found = False
for j in range(1):
res = test(s1, s2, True, True, ine_curve, num_optim, effector, mu, use_Kin)
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [0,1,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [0,1,0,1.])
if res[0]:
break
if not res[0]:
n_fail += 1
print "n_fail ", n_fail
if(gen_traj):
#~ a = gen_trajectory_to_play(fullBody, ppl, allpaths[:-3], flatten([[s*0.2, s* 0.6, s* 0.2] for _ in range(len(allpaths[:-3]) / 3)]))
a = gen_trajectory_to_play(fullBody, ppl, allpaths[-3:], flatten([[s*0.2, s* 0.6, s* 0.2] for _ in range(1)]))
#~ a = gen_trajectory_to_play(fullBody, ppl, allpaths, flatten([[s] for _ in range(len(allpaths) )]))
return a
def gen_several_states(states, num_optim = 0, ine_curve =True, s = 1., effector = False, mu =0.5, init_vel = [0.,0.,0.], init_acc = [0.,0.,0.], use_Kin = True):
com_1 = states[0].getCenterOfMass()
com_2 = states[-1].getCenterOfMass()
stateid = states[0].sId
stateid1 = states[-1].sId
com_vel = init_vel[:]
com_acc = init_acc[:]
start = states[0].sId
len_con = len(states)
print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAA com_vel", com_vel
print "AAAAAAAAAAAAAAAAAAAAAAAAAAAA com_acc", com_acc
print "going from, to ", com_1, "->", com_2
print "going from, to ", start, "->", start + len_con
allpoints = [com_1]
all_partitions = []
n_fail = 0;
for i in range (len(states)-1):
#~ viewer(configs[i])
res = test(states[i], states[i+1], False, False, ine_curve,num_optim, effector, mu, use_Kin)
if(not res[0]):
print "lp failed"
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [1,0,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [1,0,0,1.])
found = False
for j in range(1):
res = test(i, False, True, ine_curve, num_optim, effector, mu, use_Kin)
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [0,1,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [0,1,0,1.])
if res[0]:
allpoints+=[res[1][0],res[2][0]]
step = (1./ len_con)
idx = step * (i - start)
all_partitions += [idx +0.3*step,idx+0.7*step,idx+step]
break
if not res[0]:
n_fail += 1
else:
allpoints+=[res[1][0],res[2][0]]
step = (1./ len_con)
#~ idx = step * (i - start)
idx = step * i
all_partitions += [idx +0.2*step,idx+0.8*step,idx+step]
all_partitions = [0.] + all_partitions
print "n_fail ", n_fail
print "generating super curve"
print all_partitions
allpoints+=[com_2]
bezier_0, curve = __Bezier(allpoints, init_acc = init_acc, init_vel = init_vel)
com_vel = curve.derivate(0.5,1)
com_acc = curve.derivate(0.5,2)
com_vel = flatten(asarray(com_vel).transpose().tolist())
com_acc = flatten(asarray(com_acc).transpose().tolist())
print "at", 0.5
print "com_vel", com_vel
print "com_acc", com_acc
com_vel = curve.derivate(all_partitions[-1],1)
com_acc = curve.derivate(all_partitions[-1],2)
com_vel = flatten(asarray(com_vel).transpose().tolist())
com_acc = flatten(asarray(com_acc).transpose().tolist())
p0 = fullBody.generateCurveTrajParts(bezier_0,all_partitions) + 1
ppl.displayPath(p0-1)
# now we need to project all states to the new com positions
print "WTF ", len(all_partitions)
for k in range(3, len(all_partitions),3):
print "k ", k
print all_partitions[k]
new_com = flatten(asarray(curve(all_partitions[k]).transpose()).tolist())
print "curve end ", curve(1.)
ok = False
#~ try:
st = states[k/3]
sid = st.sId
print "for state", sid
print "before project to new com ", new_com
print "before previous com", st.getCenterOfMass()
for _ in range(7):
print "WRTFF", all_partitions[k]
new_com = flatten(asarray(curve(all_partitions[k]).transpose()).tolist())
#~ com_interm1 = flatten(asarray(curve(all_partitions[k]).transpose()).tolist())
print "com_interm1", new_com
ok = project_com_colfree(fullBody, sid , new_com)
if ok:
#~ new_com = asarray((com_interm1).transpose()).tolist()[0]
print "ok !!!!!!!!!!!!!!!!!"
break
else:
print "decreasing com"
all_partitions[k] -= 0.04
ok = fullBody.projectStateToCOM(sid, new_com,50)
print "projection", ok
if ok:
q1 = fullBody.getConfigAtState(sid)
ok = fullBody.isConfigValid(q1)[0]
print "is config valud", ok
#~ except:
#~ print "hpperr"
#~ break
if not ok:
print "faield to project"
return
j = 0;
print "WTF2"
print "len con", len_con
print "p0", p0
for i in range(p0,p0+(len_con-1)*3,3):
print "paths ids", i, " ", i+1, " ", i+3
print "state ", start + j
#~ paths_ids = [int(el) for el in fullBody.comRRTFromPos(start+j,i,i+1,i+2,num_optim)]
#~ ppl.displayPath(p0)
if(effector):
#~ assert False, "Cant deal with effectors right now"
paths_ids = [int(el) for el in fullBody.effectorRRT(start+j,i,i+1,i+2,num_optim)]
else:
paths_ids = [int(el) for el in fullBody.comRRTFromPos(start+j,i,i+1,i+2,num_optim)]
#~ paths_ids = [int(el) for el in fullBody.comRRTFromPosBetweenState(stateid,stateid1,p0+1,p0+2,p0+3,num_optim)]
j += 1
global allpaths
allpaths += paths_ids[:-1]
#~ p0 = fullBody.generateCurveTrajParts(bezier_0,partions)
a = gen_trajectory_to_play(fullBody, ppl, allpaths, flatten([[s*0.2, s* 0.6, s* 0.2] for _ in range(len(allpaths) / 3)]))
return a, com_vel, com_acc
def gen_several_states_partial(start = 0, len_con = 1, num_optim = 0, ine_curve =True, s = 1., effector = False, mu =0.5, init_vel = [0.,0.,0.], init_acc = [0.,0.,0.], path = False):
com_1 = __get_com(fullBody, fullBody.getConfigAtState(start))
com_2 = __get_com(fullBody, fullBody.getConfigAtState(start+len_con))
com_vel = init_vel[:]
com_acc = init_acc[:]
print "going from, to ", com_1, "->", com_2
#~ print "going from, to ", start, "->", start + len_con
allpoints = [com_1]
all_partitions = []
n_fail = 0;
for i in range (start, start+len_con):
#~ viewer(configs[i])
res = test(i, False, False, ine_curve,num_optim, effector, mu)
if(not res[0]):
print "lp failed"
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [1,0,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [1,0,0,1.])
found = False
for j in range(10):
res = test(i, False, True, ine_curve, num_optim, effector, mu)
createPtBox(viewer.client.gui, 0, res[1][0], 0.01, [0,1,0,1.])
createPtBox(viewer.client.gui, 0, res[2][0], 0.01, [0,1,0,1.])
if res[0]:
allpoints+=[res[1][0],res[2][0]]
step = (1./ len_con)
idx = step * (i - start)
all_partitions += [idx +0.2*step,idx+0.8*step,idx+step]
break
if not res[0]:
n_fail += 1
else:
allpoints+=[res[1][0],res[2][0]]
step = (1./ len_con)
idx = step * (i - start)
all_partitions += [idx +0.2*step,idx+0.8*step,idx+step]
print "[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[", all_partitions
allpoints+=[com_2]
bezier_0, curve = __Bezier(allpoints, init_acc = com_acc, init_vel = com_vel)
all_partitions = [0.] + all_partitions[:-3]
com_vel = curve.derivate(all_partitions[-1],1)
com_acc = curve.derivate(all_partitions[-1],2)
com_vel = flatten(asarray(com_vel).transpose().tolist())
com_acc = flatten(asarray(com_acc).transpose().tolist())
p0 = fullBody.generateCurveTrajParts(bezier_0,all_partitions) + 1
print all_partitions
#~ ppl.displayPath(p0-1)
ppl.displayPath(p0)
ppl.displayPath(p0+1)
ppl.displayPath(p0+2)
#~ ppl.displayPath(p0)
# now we need to project all states to the new com positions
for k in range(3, len(all_partitions),3):
print "k ", k
print all_partitions[k]
new_com = flatten(asarray(curve(all_partitions[k]).transpose()).tolist())
ok = False
#~ try:
sid = start+k/3
print "for state", sid
print "before project to new com ", new_com
print "before previous com", __get_com(fullBody, fullBody.getConfigAtState(sid))
#~ new_com[0]+=0.02
ok = fullBody.projectStateToCOM(sid, new_com)
#~ print "projection", ok
if ok:
q1 = fullBody.getConfigAtState(sid)
ok = fullBody.isConfigValid(q1)[0]
#~ print "is config valud", ok
#~ except:
#~ print "hpperr"
#~ break
if not ok:
print "faield to project"
return [], com_vel, com_acc
j = 0;
#~ print "WTF2"
if path:
for i in range(p0,p0+len_con*3-3,3):
try:
#~ print "FOR STATE ", start+j
#~ print "USING PATHS", i
paths_ids = [int(el) for el in fullBody.comRRTFromPos(start+j,i,i+1,i+2,num_optim)]
#~ paths_ids = [int(el) for el in fullBody.effectorRRT(start+j,i,i+1,i+2,num_optim)]
except:
print "COULD NOT SOLVE COMRRT"
return [], com_vel, com_acc
j += 1
global allpaths
allpaths += paths_ids[:-1]
#~ p0 = fullBody.generateCurveTrajParts(bezier_0,partions)
#~ a = gen_trajectory_to_play(fullBody, ppl, allpaths, flatten([[s*0.2, s* 0.6, s* 0.2] for _ in range(len(allpaths) / 3)]))
a = [] #TODO
return a, com_vel, com_acc
viewer = None
tp = None
ppl = None
fullBody = None
b_id = 0
scene = "bos"
first_init = True
def clean_path():
global allpaths
allpaths = []
def init_bezier_traj(robot, r, pplayer, qs, comConstraints):
global viewer
global tp
global ppl
global fullBody
global viewer
global configs
global first_init
configs = qs
viewer = r
ppl = pplayer
fullBody = robot
if first_init:
viewer.client.gui.createScene(scene)
first_init = False
global limbsCOMConstraints
limbsCOMConstraints = comConstraints
com_vel = [0.,0.,0.]
com_acc = [0.,0.,0.]
vels = []
accs = []
path = []
a_s = []
def go0(states, one_curve = True, num_optim = 0, mu = 0.6, s =None, use_kin = True, effector = False):
global com_vel
global com_acc
global vels
global accs
global path
sc = s
for i, el in enumerate(states[:-1]):
if s == None:
sc = max(norm(array(states[i+1].q()) - array(el.q())), 1.) * 0.5
path += gen(el,states[i+1],mu=mu,num_optim=num_optim, s=sc, ine_curve = one_curve, use_Kin = use_kin, effector = effector)
print "path", len(path)
return path
def go2(states, one_curve = True, num_optim = 0, mu = 0.6, s =None, use_kin = True, effector = False, init_vel =com_vel, init_acc = com_acc):
global com_vel
global com_acc
global vels
global accs
if init_vel == None:
init_vel =com_vel
if init_acc == None:
init_acc =com_acc
path = []
sc = s
try:
for i, el in enumerate(states[:-2]):
print "************ one call to ", i
if s == None:
sc = max(norm(array(states[i+1].q()) - array(el.q())), 1.) * 0.6
print "states idds ", i, " ", i+2, " ", len (states[i:i+2])
a, ve, ac = gen_several_states(states[i:i+2],mu=mu,num_optim=num_optim, s=sc, ine_curve = one_curve, use_Kin = use_kin, effector = effector, init_vel =com_vel, init_acc = com_acc)
com_vel = ve
com_acc = ac
clean_path();
path += a
a, ve, ac = gen_several_states(states[-2:],mu=mu,num_optim=num_optim, s=sc, ine_curve = one_curve, use_Kin = use_kin, effector = effector, init_vel =com_vel, init_acc = com_acc)
com_vel = ve
com_acc = ac
path += a
except:
print "FAILT"
return path
print "path", len(path)
return path
def reset():
global com_vel
global com_acc
global vels
global accs
global a_s
global path
com_vel = [0.,0.,0.]
com_acc = [0.,0.,0.]
clean_path();
vels = []
accs = []
path = []
a_s = []
for i, config in enumerate(configs):
fullBody.setConfigAtState(i,config)
| pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/siggraph_asia/chair/bezier_traj.py | Python | lgpl-3.0 | 22,357 |
import os
import sys
import time
import inspect
from pyo import *
# To automate audio output validation, every random has to be properly
# seeded for the examples to be determinitic.
def play_example(cls, dur=5, toprint=True, double=False):
"""
Execute the documentation example of the object given as an argument.
:Args:
cls: PyoObject class or string
Class reference of the desired object example. If this argument
is the string of the full path of an example (as returned by the
getPyoExamples() function), it will be executed.
dur: float, optional
Duration of the example.
toprint: boolean, optional
If True, the example script will be printed to the console.
Defaults to True.
double: boolean, optional
If True, force the example to run in double precision (64-bit)
Defaults to False.
"""
root_dir = os.path.join(os.path.split(__file__)[0], "manual_example_references")
if not os.path.isdir(root_dir):
os.mkdir(root_dir)
executable = sys.executable
if not executable or executable is None:
executable = "python3"
doc = cls.__doc__.splitlines()
filename = cls.__name__ + ".wav"
filepath = os.path.join(root_dir, filename)
lines = []
store = False
for line in doc:
if not store:
if ">>> s = Server" in line:
line = line.replace("Server()", 'Server(audio="offline")')
line = line + "\ns.recordOptions(filename=r'{}', dur={})".format(filepath, dur)
store = True
if store:
if line.strip() == "":
store = False
elif 's.start()' in line:
pass
else:
lines.append(line)
if lines == []:
print("There is no manual example for %s object." % cls.__name__)
return
ex_lines = [l.lstrip(" ") for l in lines if ">>>" in l or "..." in l]
if hasattr(builtins, "pyo_use_double") or double:
ex = "import time\nfrom pyo64 import *\n"
else:
ex = "import time\nfrom pyo import *\n"
for line in ex_lines:
if ">>>" in line:
line = line.lstrip(">>> ")
if "..." in line:
line = " " + line.lstrip("... ")
ex += line + "\n"
ex += "s.start()\ns.shutdown()\n"
f = tempfile.NamedTemporaryFile(delete=False)
if toprint:
f.write(tobytes('print(r"""\n%s\n""")\n' % ex))
f.write(tobytes(ex))
f.close()
call([executable, f.name])
tree = OBJECTS_TREE["PyoObjectBase"]
_list = []
for k in tree["PyoObject"].keys():
_list.extend(tree["PyoObject"][k])
_list.extend(tree["PyoMatrixObject"])
_list.extend(tree["PyoTableObject"])
_list.extend(tree["PyoPVObject"])
if sys.platform == "win32":
_list.remove("SharedTable")
print(_list)
t = time.time()
for i, obj in enumerate(_list):
play_example(eval(obj), toprint=False, double=True)
print("Elapsed time: {}".format(time.time() - t))
| belangeo/pyo | tests/play_all_manual_examples.py | Python | lgpl-3.0 | 3,067 |
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re
import string
from youtube_dl.extractor import YoutubeIE
from youtube_dl.utils import compat_str, compat_urlretrieve
_TESTS = [
(
'https://s.ytimg.com/yts/jsbin/html5player-vflHOr_nV.js',
'js',
86,
'>=<;:/.-[+*)(\'&%$#"!ZYX0VUTSRQPONMLKJIHGFEDCBA\\yxwvutsrqponmlkjihgfedcba987654321',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfldJ8xgI.js',
'js',
85,
'3456789a0cdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS[UVWXYZ!"#$%&\'()*+,-./:;<=>?@',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-vfle-mVwz.js',
'js',
90,
']\\[@?>=<;:/.-,+*)(\'&%$#"hZYXWVUTSRQPONMLKJIHGFEDCBAzyxwvutsrqponmlkjiagfedcb39876',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl0Cbn9e.js',
'js',
84,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVW@YZ!"#$%&\'()*+,-./:;<=',
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js',
'js',
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
),
(
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
'swf',
86,
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
),
(
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
'swf',
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
'js',
84,
'123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ0STUVWXYZ!"#$%&\'()*+,@./:;<=>'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vfl9FYC6l.js',
'js',
83,
'123456789abcdefghijklmnopqr0tuvwxyzABCDETGHIJKLMNOPQRS>UVWXYZ!"#$%&\'()*+,-./:;<=F'
),
(
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflCGk6yw/html5player.js',
'js',
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
)
]
class TestSignature(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
self.TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
if not os.path.exists(self.TESTDATA_DIR):
os.mkdir(self.TESTDATA_DIR)
def make_tfunc(url, stype, sig_input, expected_sig):
m = re.match(r'.*-([a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.[a-z]+$', url)
assert m, '%r should follow URL format' % url
test_id = m.group(1)
def test_func(self):
basename = 'player-%s.%s' % (test_id, stype)
fn = os.path.join(self.TESTDATA_DIR, basename)
if not os.path.exists(fn):
compat_urlretrieve(url, fn)
ie = YoutubeIE()
if stype == 'js':
with io.open(fn, encoding='utf-8') as testf:
jscode = testf.read()
func = ie._parse_sig_js(jscode)
else:
assert stype == 'swf'
with open(fn, 'rb') as testf:
swfcode = testf.read()
func = ie._parse_sig_swf(swfcode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
got_sig = func(src_sig)
self.assertEqual(got_sig, expected_sig)
test_func.__name__ = str('test_signature_' + stype + '_' + test_id)
setattr(TestSignature, test_func.__name__, test_func)
for test_spec in _TESTS:
make_tfunc(*test_spec)
if __name__ == '__main__':
unittest.main()
| riking/youtube-dl | test/test_youtube_signature.py | Python | unlicense | 4,167 |
'''
Find the median element in an unsorted array
'''
import heapq
def find_median(arr):
# O(n)
heapq.heapify(arr)
num_elements = len(arr)
if num_elements % 2 != 0:
return arr[(num_elements + 1)/2 - 1]
else:
return (arr[num_elements/2 - 1] + arr[num_elements/2 + 1 - 1])/2.0
assert find_median([1, -1, 2, 3, 4]) == 2
assert find_median([1, -1, 2, 3, 4, 5]) == 2.5
| amitsaha/learning | python/search/find_median.py | Python | unlicense | 401 |
"""Suit + values are ints"""
from random import shuffle
class Card:
suits = ("spades", "hearts", "diamonds", "clubs")
values = (None, None, '2', '3',
'4', '5', '6', '7',
'8', '9', '10',
'Jack','Queen',
'King', 'Ace')
def __init__(self, v, s):
self.suite = s
self.value = v
def __lt__(self,c2):
if self.value < c2.value:
return True
if self.value == c2.value:
if self.suite < c2.suite:
return True
else:
return False
return False
def __gt__(self,c2):
if self.value > c2.value:
return True
if self.value == c2.value:
if self.suite > c2.suite:
return True
else:
return False
return False
def __repr__(self):
v = self.values[self.value] +\
" of " + \
self.suits[self.suite]
return v
class Deck():
def __init__(self):
self.cards = []
for i in range(2,15):
for j in range(0,4):
self.cards\
.append(Card(i,
j))
shuffle(self.cards)
def rm_card(self):
if len(self.cards) == 0:
return
return self.cards.pop()
| Frikeer/LearnPython | exc15/lib.py | Python | unlicense | 1,360 |
__author__ = 'ralmn'
| ralmn/CMAsk | cmask/__init__.py | Python | unlicense | 21 |
# Advent of Code Solutions: Day 6, part 2
# https://github.com/emddudley/advent-of-code-solutions
import re
def twinkle_lights(instruction, lights):
tokens = re.split(r'(\d+)', instruction)
operation = tokens[0].strip()
if operation == 'turn on':
twinkle = lambda x: x + 1
elif operation == 'turn off':
twinkle = lambda x: max(x - 1, 0)
elif operation == 'toggle':
twinkle = lambda x: x + 2
else:
twinkle = lambda x: x
coord_1 = [ int(tokens[1]), int(tokens[3]) ]
coord_2 = [ int(tokens[5]), int(tokens[7]) ]
for x in range(coord_1[0], coord_2[0] + 1):
for y in range(coord_1[1], coord_2[1] + 1):
lights[x][y] = twinkle(lights[x][y])
lights = [ [ 0 ] * 1000 for n in range(1000) ]
with open('input', 'r') as input:
for instruction in input:
twinkle_lights(instruction, lights)
print(sum(map(sum, lights)))
| emddudley/advent-of-code-solutions | 2015/day-6/advent-day-6-2.py | Python | unlicense | 914 |
import os
import sys
# Import the Flask Framework
from flask import Flask, request, render_template
isDev = os.environ["SERVER_SOFTWARE"].find('Development') == 0
app = Flask( __name__ )
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def home():
return render_template( 'index.html', isDev=isDev )
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def page_not_found(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
| nick-jonas/static-jspm-boiler | main.py | Python | unlicense | 662 |
sum = 1
curr = 3
for width in xrange(3,1002,2):
inc = width - 1
sum = sum + curr #bottom right
curr = curr + inc
sum = sum + curr #bottom left
curr = curr + inc
sum = sum + curr #top left
curr = curr + inc
sum = sum + curr #top right
curr = curr + inc + 2
print sum
| kbrose/project_euler | p20-29/p28.py | Python | unlicense | 304 |
# -*- coding: utf-8 -*-
import sys
import time
def make(session):
session.execute("make (%(rnd_name.0)s)")
a_guid = session.message()[1][-1]
assert(session.message() is None)
return(a_guid)
def link(session, a, l, b):
session.execute("make %s -[%s]> %s" % (a, l, b))
assert(session.message() is None)
def kill(session, a, l, b=None):
if (b is None):
session.execute("kill %s -[%s]> ()" % (a, l))
else:
session.execute("kill %s -[%s]> %s" % (a, l, b))
assert(session.message() is None)
def kattr_put(session, a, name, value, ttl=None):
if (ttl is None):
ttl = ""
else:
ttl = " with ttl:%d" % (ttl,)
session.execute("attr put %s \"%s\" %s%s" % (a, name, value, ttl))
assert(session.message() is None)
def tattr_put(session, a, name, time, value, ttl=None):
if (ttl is None):
ttl = ""
else:
ttl = " with ttl:%d" % (ttl,)
session.execute("attr put %s \"%s\" [%s] %s%s" % (a, name, time, value, ttl))
assert(session.message() is None)
def kattr_del(session, a, name):
session.execute("attr del %s \"%s\"" % (a, name))
assert(session.message() is None)
def string_value(a):
return("\"%s\"" % a)
def int32_value(a):
return("(int32 %d)" % a)
def uint32_value(a):
return("(uint32 %d)" % a)
def int64_value(a):
return("(int64 %d)" % a)
def uint64_value(a):
return("(uint64 %d)" % a)
def double_value(a):
return("(double %s)" % repr(a))
def sleep(t):
sys.stdout.write("(time.sleep %d)" % t)
sys.stdout.flush()
time.sleep(t * 2)
| locaweb/leela | try/src/try_leela/helpers.py | Python | apache-2.0 | 1,592 |
"""Helpers that help with state related things."""
import asyncio
import datetime as dt
import json
import logging
from collections import defaultdict
from types import TracebackType
from typing import ( # noqa: F401 pylint: disable=unused-import
Awaitable, Dict, Iterable, List, Optional, Tuple, Type, Union)
from homeassistant.loader import bind_hass
import homeassistant.util.dt as dt_util
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.mysensors.switch import (
ATTR_IR_CODE, SERVICE_SEND_IR_CODE)
from homeassistant.components.cover import (
ATTR_POSITION, ATTR_TILT_POSITION)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_OPTION, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_LOCK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK,
SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_SET_COVER_TILT_POSITION, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED,
STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF,
STATE_ON, STATE_OPEN, STATE_UNKNOWN,
STATE_UNLOCKED, SERVICE_SELECT_OPTION)
from homeassistant.core import (
Context, State, DOMAIN as HASS_DOMAIN)
from homeassistant.util.async_ import run_coroutine_threadsafe
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION],
SERVICE_SET_COVER_TILT_POSITION: [ATTR_TILT_POSITION]
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED
}
class AsyncTrackStates:
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize a TrackStates block."""
self.hass = hass
self.states = [] # type: List[State]
# pylint: disable=attribute-defined-outside-init
def __enter__(self) -> List[State]:
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(),
self.now))
def get_changed_since(states: Iterable[State],
utc_point_in_time: dt.datetime) -> List[State]:
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False) -> None:
"""Reproduce given state."""
return run_coroutine_threadsafe( # type: ignore
async_reproduce_state(hass, states, blocking), hass.loop).result()
@bind_hass
async def async_reproduce_state(
hass: HomeAssistantType,
states: Union[State, Iterable[State]],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce a list of states on multiple domains."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list) # type: Dict[str, List[State]]
for state in states:
to_call[state.domain].append(state)
async def worker(domain: str, data: List[State]) -> None:
component = getattr(hass.components, domain)
if hasattr(component, 'async_reproduce_states'):
await component.async_reproduce_states(
data,
context=context)
else:
await async_reproduce_state_legacy(
hass,
domain,
data,
blocking=blocking,
context=context)
if to_call:
# run all domains in parallel
await asyncio.gather(*[
worker(domain, data)
for domain, data in to_call.items()
])
@bind_hass
async def async_reproduce_state_legacy(
hass: HomeAssistantType,
domain: str,
states: Iterable[State],
blocking: bool = False,
context: Optional[Context] = None) -> None:
"""Reproduce given state."""
to_call = defaultdict(list) # type: Dict[Tuple[str, str], List[str]]
if domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = domain
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning("reproduce_state: Unable to find entity %s",
state.entity_id)
continue
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks = [] # type: List[Awaitable[Optional[bool]]]
for (service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
domain_tasks.append(
hass.services.async_call(service_domain, service, data, blocking,
context)
)
if domain_tasks:
await asyncio.wait(domain_tasks, loop=hass.loop)
def state_as_number(state: State) -> float:
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
from homeassistant.components.climate import (
STATE_HEAT, STATE_COOL, STATE_IDLE)
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN, STATE_HOME, STATE_HEAT, STATE_COOL):
return 1
if state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME,
STATE_IDLE):
return 0
return float(state.state)
| PetePriority/home-assistant | homeassistant/helpers/state.py | Python | apache-2.0 | 8,174 |
'''
Created on Oct 3, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import numpy as np
D = {'solverType':'splitField', 'flavor':'both', 'numRuns':500, 'expt':'intParameters', 'numProcs':16}
def getMyVars(parseNumber, D):
'''routine to return the parameters to test at the current iteration.'''
rhos, xis = np.meshgrid(np.logspace(2,4,10), np.logspace(-4,-2,10))
rhos = rhos.flatten()
xis = xis.flatten()
noFreqs = np.array(8)
bkg = 0
D['freqs'] = np.round(np.logspace(np.log10(1000), np.log10(50000), noFreqs))
D['inc'] = np.array([45*np.pi/180.0])
D['rho'] = rhos[parseNumber%100]
D['xi'] = xis[parseNumber%100]
D['bkgNo'] = int(parseNumber/100) + 100
return D
| daStrauss/subsurface | src/expts/paramSplitFieldBoth.py | Python | apache-2.0 | 1,344 |
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import inspect
import json
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, cast, get_type_hints
from pants.base import deprecated
from pants.engine.goal import GoalSubsystem
from pants.engine.target import (
AsyncFieldMixin,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
Field,
FloatField,
IntField,
RegisteredTargetTypes,
ScalarField,
SequenceField,
StringField,
StringSequenceField,
Target,
)
from pants.engine.unions import UnionMembership
from pants.option.option_util import is_dict_option, is_list_option
from pants.option.options import Options
from pants.option.parser import OptionValueHistory, Parser
from pants.util.objects import get_docstring, get_docstring_summary, pretty_print_type_hint
from pants.util.strutil import first_paragraph
class HelpJSONEncoder(json.JSONEncoder):
"""Class for JSON-encoding help data (including option values).
Note that JSON-encoded data is not intended to be decoded back. It exists purely for terminal
and browser help display.
"""
def default(self, o):
if callable(o):
return o.__name__
if isinstance(o, type):
return type.__name__
if isinstance(o, Enum):
return o.value
return super().default(o)
def to_help_str(val) -> str:
if isinstance(val, (list, dict)):
return json.dumps(val, sort_keys=True, indent=2, cls=HelpJSONEncoder)
if isinstance(val, Enum):
return str(val.value)
else:
return str(val)
@dataclass(frozen=True)
class OptionHelpInfo:
"""A container for help information for a single option.
display_args: Arg strings suitable for display in help text, including value examples
(e.g., [-f, --[no]-foo-bar, --baz=<metavar>].)
comma_separated_display_args: Display args as a comma-delimited string, used in
reference documentation.
scoped_cmd_line_args: The explicitly scoped raw flag names allowed anywhere on the cmd line,
(e.g., [--scope-baz, --no-scope-baz, --scope-qux])
unscoped_cmd_line_args: The unscoped raw flag names allowed on the cmd line in this option's
scope context (e.g., [--baz, --no-baz, --qux])
env_var: The environment variable that set's the option.
config_key: The config key for this option (in the section named for its scope).
typ: The type of the option.
default: The value of this option if no flags are specified (derived from config and env vars).
help: The help message registered for this option.
deprecated_message: If deprecated: A message explaining that this option is deprecated at
removal_version.
removal_version: If deprecated: The version at which this option is to be removed.
removal_hint: If deprecated: The removal hint message registered for this option.
choices: If this option has a constrained set of choices, a tuple of the stringified choices.
"""
display_args: Tuple[str, ...]
comma_separated_display_args: str
scoped_cmd_line_args: Tuple[str, ...]
unscoped_cmd_line_args: Tuple[str, ...]
env_var: str
config_key: str
typ: Type
default: Any
help: str
deprecation_active: bool
deprecated_message: Optional[str]
removal_version: Optional[str]
removal_hint: Optional[str]
choices: Optional[Tuple[str, ...]]
comma_separated_choices: Optional[str]
value_history: Optional[OptionValueHistory]
@dataclass(frozen=True)
class OptionScopeHelpInfo:
"""A container for help information for a scope of options.
scope: The scope of the described options.
basic|advanced|deprecated: A list of OptionHelpInfo for the options in that group.
"""
scope: str
description: str
is_goal: bool # True iff the scope belongs to a GoalSubsystem.
basic: Tuple[OptionHelpInfo, ...]
advanced: Tuple[OptionHelpInfo, ...]
deprecated: Tuple[OptionHelpInfo, ...]
def collect_unscoped_flags(self) -> List[str]:
flags: List[str] = []
for options in (self.basic, self.advanced, self.deprecated):
for ohi in options:
flags.extend(ohi.unscoped_cmd_line_args)
return flags
def collect_scoped_flags(self) -> List[str]:
flags: List[str] = []
for options in (self.basic, self.advanced, self.deprecated):
for ohi in options:
flags.extend(ohi.scoped_cmd_line_args)
return flags
@dataclass(frozen=True)
class GoalHelpInfo:
"""A container for help information for a goal."""
name: str
description: str
is_implemented: bool # True iff all unions required by the goal are implemented.
consumed_scopes: Tuple[str, ...] # The scopes of subsystems consumed by this goal.
@dataclass(frozen=True)
class TargetFieldHelpInfo:
"""A container for help information for a field in a target type."""
alias: str
description: Optional[str]
type_hint: str
required: bool
default: Optional[str]
@classmethod
def create(cls, field: Type[Field]) -> TargetFieldHelpInfo:
description: Optional[str]
if hasattr(field, "help"):
description = field.help
else:
# NB: It is very common (and encouraged) to subclass Fields to give custom behavior, e.g.
# `PythonSources` subclassing `Sources`. Here, we set `fallback_to_ancestors=True` so that
# we can still generate meaningful documentation for all these custom fields without
# requiring the Field author to rewrite the docstring.
#
# However, if the original plugin author did not define docstring, then this means we
# would typically fall back to the docstring for `Field` or a template like `StringField`.
# This is a an awkward edge of our heuristic and it's not intentional since these core
# `Field` types have documentation oriented to the plugin author and not the end user
# filling in fields in a BUILD file.
description = get_docstring(
field,
flatten=True,
fallback_to_ancestors=True,
ignored_ancestors={
*Field.mro(),
AsyncFieldMixin,
BoolField,
DictStringToStringField,
DictStringToStringSequenceField,
FloatField,
Generic, # type: ignore[arg-type]
IntField,
ScalarField,
SequenceField,
StringField,
StringSequenceField,
},
)
raw_value_type = get_type_hints(field.compute_value)["raw_value"]
type_hint = pretty_print_type_hint(raw_value_type)
# Check if the field only allows for certain choices.
if issubclass(field, StringField) and field.valid_choices is not None:
valid_choices = sorted(
field.valid_choices
if isinstance(field.valid_choices, tuple)
else (choice.value for choice in field.valid_choices)
)
type_hint = " | ".join([*(repr(c) for c in valid_choices), "None"])
if field.required:
# We hackily remove `None` as a valid option for the field when it's required. This
# greatly simplifies Field definitions because it means that they don't need to
# override the type hints for `PrimitiveField.compute_value()` and
# `AsyncField.sanitize_raw_value()` to indicate that `None` is an invalid type.
type_hint = type_hint.replace(" | None", "")
return cls(
alias=field.alias,
description=description,
type_hint=type_hint,
required=field.required,
default=(
repr(field.default) if (not field.required and field.default is not None) else None
),
)
@dataclass(frozen=True)
class TargetTypeHelpInfo:
"""A container for help information for a target type."""
alias: str
summary: Optional[str]
description: Optional[str]
fields: Tuple[TargetFieldHelpInfo, ...]
@classmethod
def create(
cls, target_type: Type[Target], *, union_membership: UnionMembership
) -> TargetTypeHelpInfo:
description: Optional[str]
summary: Optional[str]
if hasattr(target_type, "help"):
description = target_type.help
summary = first_paragraph(description)
else:
description = get_docstring(target_type)
summary = get_docstring_summary(target_type)
return cls(
alias=target_type.alias,
summary=summary,
description=description,
fields=tuple(
TargetFieldHelpInfo.create(field)
for field in target_type.class_field_types(union_membership=union_membership)
if not field.alias.startswith("_") and field.deprecated_removal_version is None
),
)
@dataclass(frozen=True)
class AllHelpInfo:
"""All available help info."""
scope_to_help_info: Dict[str, OptionScopeHelpInfo]
name_to_goal_info: Dict[str, GoalHelpInfo]
name_to_target_type_info: Dict[str, TargetTypeHelpInfo]
ConsumedScopesMapper = Callable[[str], Tuple[str, ...]]
class HelpInfoExtracter:
"""Extracts information useful for displaying help from option registration args."""
@classmethod
def get_all_help_info(
cls,
options: Options,
union_membership: UnionMembership,
consumed_scopes_mapper: ConsumedScopesMapper,
registered_target_types: RegisteredTargetTypes,
) -> AllHelpInfo:
scope_to_help_info = {}
name_to_goal_info = {}
for scope_info in sorted(options.known_scope_to_info.values(), key=lambda x: x.scope):
options.for_scope(scope_info.scope) # Force parsing.
optionable_cls = scope_info.optionable_cls
if not scope_info.description:
cls_name = (
f"{optionable_cls.__module__}.{optionable_cls.__qualname__}"
if optionable_cls
else ""
)
raise ValueError(
f"Subsystem {cls_name} with scope `{scope_info.scope}` has no description. "
f"Add a class property `help`."
)
is_goal = optionable_cls is not None and issubclass(optionable_cls, GoalSubsystem)
oshi = HelpInfoExtracter(scope_info.scope).get_option_scope_help_info(
scope_info.description, options.get_parser(scope_info.scope), is_goal
)
scope_to_help_info[oshi.scope] = oshi
if is_goal:
goal_subsystem_cls = cast(Type[GoalSubsystem], optionable_cls)
is_implemented = union_membership.has_members_for_all(
goal_subsystem_cls.required_union_implementations
)
name_to_goal_info[scope_info.scope] = GoalHelpInfo(
goal_subsystem_cls.name,
scope_info.description,
is_implemented,
consumed_scopes_mapper(scope_info.scope),
)
name_to_target_type_info = {
alias: TargetTypeHelpInfo.create(target_type, union_membership=union_membership)
for alias, target_type in registered_target_types.aliases_to_types.items()
if not alias.startswith("_") and target_type.deprecated_removal_version is None
}
return AllHelpInfo(
scope_to_help_info=scope_to_help_info,
name_to_goal_info=name_to_goal_info,
name_to_target_type_info=name_to_target_type_info,
)
@staticmethod
def compute_default(**kwargs) -> Any:
"""Compute the default val for help display for an option registered with these kwargs.
Returns a pair (default, stringified default suitable for display).
"""
ranked_default = kwargs.get("default")
fallback: Any = None
if is_list_option(kwargs):
fallback = []
elif is_dict_option(kwargs):
fallback = {}
default = (
ranked_default.value
if ranked_default and ranked_default.value is not None
else fallback
)
return default
@staticmethod
def stringify_type(t: Type) -> str:
if t == dict:
return "{'key1': val1, 'key2': val2, ...}"
return f"<{t.__name__}>"
@staticmethod
def compute_metavar(kwargs):
"""Compute the metavar to display in help for an option registered with these kwargs."""
stringify = lambda t: HelpInfoExtracter.stringify_type(t)
metavar = kwargs.get("metavar")
if not metavar:
if is_list_option(kwargs):
member_typ = kwargs.get("member_type", str)
metavar = stringify(member_typ)
# In a cmd-line list literal, string members must be quoted.
if member_typ == str:
metavar = f"'{metavar}'"
elif is_dict_option(kwargs):
metavar = f'"{stringify(dict)}"'
else:
metavar = stringify(kwargs.get("type", str))
if is_list_option(kwargs):
# For lists, the metavar (either explicit or deduced) is the representation
# of a single list member, so we turn the help string into a list of those here.
return f'"[{metavar}, {metavar}, ...]"'
return metavar
@staticmethod
def compute_choices(kwargs) -> Optional[Tuple[str, ...]]:
"""Compute the option choices to display."""
typ = kwargs.get("type", [])
member_type = kwargs.get("member_type", str)
if typ == list and inspect.isclass(member_type) and issubclass(member_type, Enum):
return tuple(choice.value for choice in member_type)
elif inspect.isclass(typ) and issubclass(typ, Enum):
return tuple(choice.value for choice in typ)
elif "choices" in kwargs:
return tuple(str(choice) for choice in kwargs["choices"])
else:
return None
def __init__(self, scope: str):
self._scope = scope
self._scope_prefix = scope.replace(".", "-")
def get_option_scope_help_info(self, description: str, parser: Parser, is_goal: bool):
"""Returns an OptionScopeHelpInfo for the options parsed by the given parser."""
basic_options = []
advanced_options = []
deprecated_options = []
for args, kwargs in parser.option_registrations_iter():
history = parser.history(kwargs["dest"])
ohi = self.get_option_help_info(args, kwargs)
ohi = dataclasses.replace(ohi, value_history=history)
if ohi.deprecation_active:
deprecated_options.append(ohi)
elif kwargs.get("advanced") or (
kwargs.get("recursive") and not kwargs.get("recursive_root")
):
# In order to keep the regular help output uncluttered, we treat recursive
# options as advanced. The concept of recursive options is not widely used
# and not clear to the end user, so it's best not to expose it as a concept.
advanced_options.append(ohi)
else:
basic_options.append(ohi)
return OptionScopeHelpInfo(
scope=self._scope,
description=description,
is_goal=is_goal,
basic=tuple(basic_options),
advanced=tuple(advanced_options),
deprecated=tuple(deprecated_options),
)
def get_option_help_info(self, args, kwargs):
"""Returns an OptionHelpInfo for the option registered with the given (args, kwargs)."""
display_args = []
scoped_cmd_line_args = []
unscoped_cmd_line_args = []
for arg in args:
is_short_arg = len(arg) == 2
unscoped_cmd_line_args.append(arg)
if self._scope_prefix:
scoped_arg = f"--{self._scope_prefix}-{arg.lstrip('-')}"
else:
scoped_arg = arg
scoped_cmd_line_args.append(scoped_arg)
if kwargs.get("type") == bool:
if is_short_arg:
display_args.append(scoped_arg)
else:
unscoped_cmd_line_args.append(f"--no-{arg[2:]}")
sa_2 = scoped_arg[2:]
scoped_cmd_line_args.append(f"--no-{sa_2}")
display_args.append(f"--[no-]{sa_2}")
else:
metavar = self.compute_metavar(kwargs)
display_args.append(f"{scoped_arg}={metavar}")
if kwargs.get("passthrough"):
type_str = self.stringify_type(kwargs.get("member_type", str))
display_args.append(f"... -- [{type_str} [{type_str} [...]]]")
typ = kwargs.get("type", str)
default = self.compute_default(**kwargs)
help_msg = kwargs.get("help", "No help available.")
deprecation_start_version = kwargs.get("deprecation_start_version")
removal_version = kwargs.get("removal_version")
deprecation_active = removal_version is not None and deprecated.is_deprecation_active(
deprecation_start_version
)
deprecated_message = None
if removal_version:
deprecated_tense = deprecated.get_deprecated_tense(removal_version)
message_start = (
"Deprecated"
if deprecation_active
else f"Upcoming deprecation in version: {deprecation_start_version}"
)
deprecated_message = (
f"{message_start}, {deprecated_tense} removed in version: {removal_version}."
)
removal_hint = kwargs.get("removal_hint")
choices = self.compute_choices(kwargs)
dest = Parser.parse_dest(*args, **kwargs)
# Global options have three env var variants. The last one is the most human-friendly.
env_var = Parser.get_env_var_names(self._scope, dest)[-1]
ret = OptionHelpInfo(
display_args=tuple(display_args),
comma_separated_display_args=", ".join(display_args),
scoped_cmd_line_args=tuple(scoped_cmd_line_args),
unscoped_cmd_line_args=tuple(unscoped_cmd_line_args),
env_var=env_var,
config_key=dest,
typ=typ,
default=default,
help=help_msg,
deprecation_active=deprecation_active,
deprecated_message=deprecated_message,
removal_version=removal_version,
removal_hint=removal_hint,
choices=choices,
comma_separated_choices=None if choices is None else ", ".join(choices),
value_history=None,
)
return ret
| jsirois/pants | src/python/pants/help/help_info_extracter.py | Python | apache-2.0 | 19,574 |
"""
WSGI config for meetingroom project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meetingroom.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| jasonperk/meetingroom | meetingroom/wsgi.py | Python | apache-2.0 | 1,144 |
"""
Implementation of hooks and APIs for outputting log messages.
"""
import sys
import traceback
import inspect
import json as pyjson
from threading import Lock
from functools import wraps
from io import IOBase
from pyrsistent import PClass, field
from . import _bytesjson as bytesjson
from zope.interface import Interface, implementer
from ._traceback import write_traceback, TRACEBACK_MESSAGE
from ._message import EXCEPTION_FIELD, MESSAGE_TYPE_FIELD, REASON_FIELD
from ._util import saferepr, safeunicode
from .json import EliotJSONEncoder
from ._validation import ValidationError
class _DestinationsSendError(Exception):
"""
An error occured sending to one or more destinations.
@ivar errors: A list of tuples output from C{sys.exc_info()}.
"""
def __init__(self, errors):
self.errors = errors
Exception.__init__(self, errors)
class BufferingDestination(object):
"""
Buffer messages in memory.
"""
def __init__(self):
self.messages = []
def __call__(self, message):
self.messages.append(message)
while len(self.messages) > 1000:
self.messages.pop(0)
class Destinations(object):
"""
Manage a list of destinations for message dictionaries.
The global instance of this class is where L{Logger} instances will
send written messages.
"""
def __init__(self):
self._destinations = [BufferingDestination()]
self._any_added = False
self._globalFields = {}
def addGlobalFields(self, **fields):
"""
Add fields that will be included in all messages sent through this
destination.
@param fields: Keyword arguments mapping field names to values.
"""
self._globalFields.update(fields)
def send(self, message):
"""
Deliver a message to all destinations.
The passed in message might be mutated.
@param message: A message dictionary that can be serialized to JSON.
@type message: L{dict}
"""
message.update(self._globalFields)
errors = []
for dest in self._destinations:
try:
dest(message)
except:
errors.append(sys.exc_info())
if errors:
raise _DestinationsSendError(errors)
def add(self, *destinations):
"""
Adds new destinations.
A destination should never ever throw an exception. Seriously.
A destination should not mutate the dictionary it is given.
@param destinations: A list of callables that takes message
dictionaries.
"""
buffered_messages = None
if not self._any_added:
# These are first set of messages added, so we need to clear
# BufferingDestination:
self._any_added = True
buffered_messages = self._destinations[0].messages
self._destinations = []
self._destinations.extend(destinations)
if buffered_messages:
# Re-deliver buffered messages:
for message in buffered_messages:
self.send(message)
def remove(self, destination):
"""
Remove an existing destination.
@param destination: A destination previously added with C{self.add}.
@raises ValueError: If the destination is unknown.
"""
self._destinations.remove(destination)
class ILogger(Interface):
"""
Write out message dictionaries to some destination.
"""
def write(dictionary, serializer=None):
"""
Write a dictionary to the appropriate destination.
@note: This method is thread-safe.
@param serializer: Either C{None}, or a
L{eliot._validation._MessageSerializer} which can be used to
validate this message.
@param dictionary: The message to write out. The given dictionary
will not be mutated.
@type dictionary: C{dict}
"""
@implementer(ILogger)
class Logger(object):
"""
Write out messages to the globally configured destination(s).
You will typically want to create one of these for every chunk of code
whose messages you want to unit test in isolation, e.g. a class. The tests
can then replace a specific L{Logger} with a L{MemoryLogger}.
"""
_destinations = Destinations()
_log_tracebacks = True
def _safeUnicodeDictionary(self, dictionary):
"""
Serialize a dictionary to a unicode string no matter what it contains.
The resulting dictionary will loosely follow Python syntax but it is
not expected to actually be a lossless encoding in all cases.
@param dictionary: A L{dict} to serialize.
@return: A L{unicode} string representing the input dictionary as
faithfully as can be done without putting in too much effort.
"""
try:
return str(
dict(
(saferepr(key), saferepr(value))
for (key, value) in dictionary.items()
)
)
except:
return saferepr(dictionary)
def write(self, dictionary, serializer=None):
"""
Serialize the dictionary, and write it to C{self._destinations}.
"""
dictionary = dictionary.copy()
try:
if serializer is not None:
serializer.serialize(dictionary)
except:
write_traceback(self)
from ._action import log_message
log_message(
"eliot:serialization_failure",
message=self._safeUnicodeDictionary(dictionary),
__eliot_logger__=self,
)
return
try:
self._destinations.send(dictionary)
except _DestinationsSendError as e:
from ._action import log_message
if self._log_tracebacks:
for (exc_type, exception, exc_traceback) in e.errors:
# Can't use same Logger as serialization errors because
# if destination continues to error out we will get
# infinite recursion. So instead we have to manually
# construct a Logger that won't retry.
logger = Logger()
logger._log_tracebacks = False
logger._destinations = self._destinations
msg = {
MESSAGE_TYPE_FIELD: "eliot:destination_failure",
REASON_FIELD: safeunicode(exception),
EXCEPTION_FIELD: exc_type.__module__ + "." + exc_type.__name__,
"message": self._safeUnicodeDictionary(dictionary),
"__eliot_logger__": logger,
}
log_message(**msg)
else:
# Nothing we can do here, raising exception to caller will
# break business logic, better to have that continue to
# work even if logging isn't.
pass
def exclusively(f):
"""
Decorate a function to make it thread-safe by serializing invocations
using a per-instance lock.
"""
@wraps(f)
def exclusively_f(self, *a, **kw):
with self._lock:
return f(self, *a, **kw)
return exclusively_f
@implementer(ILogger)
class MemoryLogger(object):
"""
Store written messages in memory.
When unit testing you don't want to create this directly but rather use
the L{eliot.testing.validateLogging} decorator on a test method, which
will provide additional testing integration.
@ivar messages: A C{list} of the dictionaries passed to
L{MemoryLogger.write}. Do not mutate this list.
@ivar serializers: A C{list} of the serializers passed to
L{MemoryLogger.write}, each corresponding to a message
L{MemoryLogger.messages}. Do not mutate this list.
@ivar tracebackMessages: A C{list} of messages written to this logger for
tracebacks using L{eliot.write_traceback} or L{eliot.writeFailure}. Do
not mutate this list.
"""
def __init__(self, encoder=EliotJSONEncoder):
"""
@param encoder: A JSONEncoder subclass to use when encoding JSON.
"""
self._lock = Lock()
self._encoder = encoder
self.reset()
@exclusively
def flushTracebacks(self, exceptionType):
"""
Flush all logged tracebacks whose exception is of the given type.
This means they are expected tracebacks and should not cause the test
to fail.
@param exceptionType: A subclass of L{Exception}.
@return: C{list} of flushed messages.
"""
result = []
remaining = []
for message in self.tracebackMessages:
if isinstance(message[REASON_FIELD], exceptionType):
result.append(message)
else:
remaining.append(message)
self.tracebackMessages = remaining
return result
# PEP 8 variant:
flush_tracebacks = flushTracebacks
@exclusively
def write(self, dictionary, serializer=None):
"""
Add the dictionary to list of messages.
"""
# Validate copy of the dictionary, to ensure what we store isn't
# mutated.
try:
self._validate_message(dictionary.copy(), serializer)
except Exception as e:
# Skip irrelevant frames that don't help pinpoint the problem:
from . import _output, _message, _action
skip_filenames = [_output.__file__, _message.__file__, _action.__file__]
for frame in inspect.stack():
if frame[1] not in skip_filenames:
break
self._failed_validations.append(
"{}: {}".format(e, "".join(traceback.format_stack(frame[0])))
)
self.messages.append(dictionary)
self.serializers.append(serializer)
if serializer is TRACEBACK_MESSAGE._serializer:
self.tracebackMessages.append(dictionary)
def _validate_message(self, dictionary, serializer):
"""Validate an individual message.
As a side-effect, the message is replaced with its serialized contents.
@param dictionary: A message C{dict} to be validated. Might be mutated
by the serializer!
@param serializer: C{None} or a serializer.
@raises TypeError: If a field name is not unicode, or the dictionary
fails to serialize to JSON.
@raises eliot.ValidationError: If serializer was given and validation
failed.
"""
if serializer is not None:
serializer.validate(dictionary)
for key in dictionary:
if not isinstance(key, str):
if isinstance(key, bytes):
key.decode("utf-8")
else:
raise TypeError(dictionary, "%r is not unicode" % (key,))
if serializer is not None:
serializer.serialize(dictionary)
try:
pyjson.dumps(dictionary, cls=self._encoder)
except Exception as e:
raise TypeError("Message %s doesn't encode to JSON: %s" % (dictionary, e))
@exclusively
def validate(self):
"""
Validate all written messages.
Does minimal validation of types, and for messages with corresponding
serializers use those to do additional validation.
As a side-effect, the messages are replaced with their serialized
contents.
@raises TypeError: If a field name is not unicode, or the dictionary
fails to serialize to JSON.
@raises eliot.ValidationError: If serializer was given and validation
failed.
"""
for dictionary, serializer in zip(self.messages, self.serializers):
try:
self._validate_message(dictionary, serializer)
except (TypeError, ValidationError) as e:
# We already figured out which messages failed validation
# earlier. This just lets us figure out which exception type to
# raise.
raise e.__class__("\n\n".join(self._failed_validations))
@exclusively
def serialize(self):
"""
Serialize all written messages.
This is the Field-based serialization, not JSON.
@return: A C{list} of C{dict}, the serialized messages.
"""
result = []
for dictionary, serializer in zip(self.messages, self.serializers):
dictionary = dictionary.copy()
serializer.serialize(dictionary)
result.append(dictionary)
return result
@exclusively
def reset(self):
"""
Clear all logged messages.
Any logged tracebacks will also be cleared, and will therefore not
cause a test failure.
This is useful to ensure a logger is in a known state before testing
logging of a specific code path.
"""
self.messages = []
self.serializers = []
self.tracebackMessages = []
self._failed_validations = []
class FileDestination(PClass):
"""
Callable that writes JSON messages to a file.
On Python 3 the file may support either C{bytes} or C{unicode}. On
Python 2 only C{bytes} are supported since that is what all files expect
in practice.
@ivar file: The file to which messages will be written.
@ivar _dumps: Function that serializes an object to JSON.
@ivar _linebreak: C{"\n"} as either bytes or unicode.
"""
file = field(mandatory=True)
encoder = field(mandatory=True)
_dumps = field(mandatory=True)
_linebreak = field(mandatory=True)
def __new__(cls, file, encoder=EliotJSONEncoder):
if isinstance(file, IOBase) and not file.writable():
raise RuntimeError("Given file {} is not writeable.")
unicodeFile = False
try:
file.write(b"")
except TypeError:
unicodeFile = True
if unicodeFile:
# On Python 3 native json module outputs unicode:
_dumps = pyjson.dumps
_linebreak = "\n"
else:
_dumps = bytesjson.dumps
_linebreak = b"\n"
return PClass.__new__(
cls, file=file, _dumps=_dumps, _linebreak=_linebreak, encoder=encoder
)
def __call__(self, message):
"""
@param message: A message dictionary.
"""
self.file.write(self._dumps(message, cls=self.encoder) + self._linebreak)
self.file.flush()
def to_file(output_file, encoder=EliotJSONEncoder):
"""
Add a destination that writes a JSON message per line to the given file.
@param output_file: A file-like object.
@param encoder: A JSONEncoder subclass to use when encoding JSON.
"""
Logger._destinations.add(FileDestination(file=output_file, encoder=encoder))
# The default Logger, used when none is specified:
_DEFAULT_LOGGER = Logger()
| ScatterHQ/eliot | eliot/_output.py | Python | apache-2.0 | 15,228 |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Zheng Zhang
# --------------------------------------------------------
import argparse
import pprint
import logging
import time
import os
import mxnet as mx
from config.config import config, generate_config, update_config
from config.dataset_conf import dataset
from config.network_conf import network
from symbols import *
from dataset import *
from core.loader import TestDataLoader
from core.tester import Predictor, pred_eval
from utils.load_model import load_param
def test_deeplab(network, dataset, image_set, root_path, dataset_path,
ctx, prefix, epoch,
vis, logger=None, output_path=None):
if not logger:
assert False, 'require a logger'
# print config
pprint.pprint(config)
logger.info('testing config:{}\n'.format(pprint.pformat(config)))
# load symbol and testing data
sym = eval('get_' + network + '_test')(num_classes=config.dataset.NUM_CLASSES)
imdb = eval(dataset)(image_set, root_path, dataset_path, result_path=output_path)
segdb = imdb.gt_segdb()
# get test data iter
test_data = TestDataLoader(segdb, batch_size=len(ctx))
# load model
# arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)
arg_params, aux_params = load_param(prefix, epoch, process=True)
# infer shape
data_shape_dict = dict(test_data.provide_data_single)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or k in ['softmax_label']:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data_single]
label_names = ['softmax_label']
max_data_shape = [[('data', (1, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]]
# create predictor
predictor = Predictor(sym, data_names, label_names,
context=ctx, max_data_shapes=max_data_shape,
provide_data=test_data.provide_data, provide_label=test_data.provide_label,
arg_params=arg_params, aux_params=aux_params)
# start detection
pred_eval(predictor, test_data, imdb, vis=vis, logger=logger)
| deepinsight/Deformable-ConvNets | deeplab/function/test_deeplab.py | Python | apache-2.0 | 3,137 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import logging
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import DefaultDict
from pants.backend.shell.lint.shellcheck.subsystem import Shellcheck
from pants.backend.shell.shell_setup import ShellSetup
from pants.backend.shell.target_types import ShellSources
from pants.base.specs import AddressSpecs, DescendantAddresses
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.engine.addresses import Address
from pants.engine.collection import DeduplicatedCollection
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process, ProcessCacheScope
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
SourcesPaths,
SourcesPathsRequest,
Targets,
WrappedTarget,
)
from pants.engine.unions import UnionRule
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.ordered_set import OrderedSet
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ShellMapping:
"""A mapping of Shell file names to their owning file address."""
mapping: FrozenDict[str, Address]
ambiguous_modules: FrozenDict[str, tuple[Address, ...]]
@rule(desc="Creating map of Shell file names to Shell targets", level=LogLevel.DEBUG)
async def map_shell_files() -> ShellMapping:
all_expanded_targets = await Get(Targets, AddressSpecs([DescendantAddresses("")]))
shell_tgts = tuple(tgt for tgt in all_expanded_targets if tgt.has_field(ShellSources))
sources_per_target = await MultiGet(
Get(SourcesPaths, SourcesPathsRequest(tgt[ShellSources])) for tgt in shell_tgts
)
files_to_addresses: dict[str, Address] = {}
files_with_multiple_owners: DefaultDict[str, set[Address]] = defaultdict(set)
for tgt, sources in zip(shell_tgts, sources_per_target):
for f in sources.files:
if f in files_to_addresses:
files_with_multiple_owners[f].update({files_to_addresses[f], tgt.address})
else:
files_to_addresses[f] = tgt.address
# Remove files with ambiguous owners.
for ambiguous_f in files_with_multiple_owners:
files_to_addresses.pop(ambiguous_f)
return ShellMapping(
mapping=FrozenDict(sorted(files_to_addresses.items())),
ambiguous_modules=FrozenDict(
(k, tuple(sorted(v))) for k, v in sorted(files_with_multiple_owners.items())
),
)
class ParsedShellImports(DeduplicatedCollection):
sort_input = True
@dataclass(frozen=True)
class ParseShellImportsRequest:
# NB: We parse per-file, rather than per-target. This is necessary so that we can have each
# file in complete isolation without its sibling files present so that Shellcheck errors when
# trying to source a sibling file, which then allows us to extract that path.
digest: Digest
fp: str
PATH_FROM_SHELLCHECK_ERROR = re.compile(r"Not following: (.+) was not specified as input")
@rule
async def parse_shell_imports(
request: ParseShellImportsRequest, shellcheck: Shellcheck
) -> ParsedShellImports:
# We use Shellcheck to parse for us by running it against each file in isolation, which means
# that all `source` statements will error. Then, we can extract the problematic paths from the
# JSON output.
downloaded_shellcheck = await Get(
DownloadedExternalTool, ExternalToolRequest, shellcheck.get_request(Platform.current)
)
input_digest = await Get(Digest, MergeDigests([request.digest, downloaded_shellcheck.digest]))
process_result = await Get(
FallibleProcessResult,
Process(
# NB: We do not load up `[shellcheck].{args,config}` because it would risk breaking
# determinism of dependency inference in an unexpected way.
[downloaded_shellcheck.exe, "--format=json", request.fp],
input_digest=input_digest,
description=f"Detect Shell imports for {request.fp}",
level=LogLevel.DEBUG,
# We expect this to always fail, but it should still be cached because the process is
# deterministic.
cache_scope=ProcessCacheScope.ALWAYS,
),
)
try:
output = json.loads(process_result.stdout)
except json.JSONDecodeError:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's output "
f"could not be loaded as JSON. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message attached.\n\n"
f"\nshellcheck version: {shellcheck.version}\n"
f"process_result.stdout: {process_result.stdout.decode()}"
)
return ParsedShellImports()
paths = set()
for error in output:
if not error.get("code", "") == 1091:
continue
msg = error.get("message", "")
matches = PATH_FROM_SHELLCHECK_ERROR.match(msg)
if matches:
paths.add(matches.group(1))
else:
logger.error(
f"Parsing {request.fp} for dependency inference failed because Shellcheck's error "
f"message was not in the expected format. Please open a GitHub issue at "
f"https://github.com/pantsbuild/pants/issues/new with this error message "
f"attached.\n\n\nshellcheck version: {shellcheck.version}\n"
f"error JSON entry: {error}"
)
return ParsedShellImports(paths)
class InferShellDependencies(InferDependenciesRequest):
infer_from = ShellSources
@rule(desc="Inferring Shell dependencies by analyzing imports")
async def infer_shell_dependencies(
request: InferShellDependencies, shell_mapping: ShellMapping, shell_setup: ShellSetup
) -> InferredDependencies:
if not shell_setup.dependency_inference:
return InferredDependencies([], sibling_dependencies_inferrable=False)
address = request.sources_field.address
wrapped_tgt = await Get(WrappedTarget, Address, address)
explicitly_provided_deps, hydrated_sources = await MultiGet(
Get(ExplicitlyProvidedDependencies, DependenciesRequest(wrapped_tgt.target[Dependencies])),
Get(HydratedSources, HydrateSourcesRequest(request.sources_field)),
)
per_file_digests = await MultiGet(
Get(Digest, DigestSubset(hydrated_sources.snapshot.digest, PathGlobs([f])))
for f in hydrated_sources.snapshot.files
)
all_detected_imports = await MultiGet(
Get(ParsedShellImports, ParseShellImportsRequest(digest, f))
for digest, f in zip(per_file_digests, hydrated_sources.snapshot.files)
)
result: OrderedSet[Address] = OrderedSet()
for detected_imports in all_detected_imports:
for import_path in detected_imports:
unambiguous = shell_mapping.mapping.get(import_path)
ambiguous = shell_mapping.ambiguous_modules.get(import_path)
if unambiguous:
result.add(unambiguous)
elif ambiguous:
explicitly_provided_deps.maybe_warn_of_ambiguous_dependency_inference(
ambiguous,
address,
import_reference="file",
context=f"The target {address} sources `{import_path}`",
)
maybe_disambiguated = explicitly_provided_deps.disambiguated_via_ignores(ambiguous)
if maybe_disambiguated:
result.add(maybe_disambiguated)
return InferredDependencies(sorted(result), sibling_dependencies_inferrable=True)
def rules():
return (*collect_rules(), UnionRule(InferDependenciesRequest, InferShellDependencies))
| benjyw/pants | src/python/pants/backend/shell/dependency_inference.py | Python | apache-2.0 | 8,271 |
import logging
import tarfile
import tempfile
import os
import fabric.api
import fabric.operations
import cloudenvy.envy
class Dotfiles(cloudenvy.envy.Command):
def _build_subparser(self, subparsers):
help_str = 'Upload dotfiles from your local machine to an Envy.'
subparser = subparsers.add_parser('dotfiles', help=help_str,
description=help_str)
subparser.set_defaults(func=self.run)
subparser.add_argument('-n', '--name', action='store', default='',
help='Specify custom name for an Envy.')
subparser.add_argument('-f', '--files', action='store',
help='Limit operation to a specific list of '
'comma-separated files.')
return subparser
def run(self, config, args):
envy = cloudenvy.envy.Envy(config)
if envy.ip():
host_string = '%s@%s' % (envy.remote_user, envy.ip())
temp_tar = tempfile.NamedTemporaryFile(delete=True)
with fabric.api.settings(host_string=host_string):
if args.files:
dotfiles = args.files.split(',')
else:
dotfiles = config['defaults']['dotfiles'].split(',')
dotfiles = [dotfile.strip() for dotfile in dotfiles]
with tarfile.open(temp_tar.name, 'w') as archive:
for dotfile in dotfiles:
path = os.path.expanduser('~/%s' % dotfile)
if os.path.exists(path):
if not os.path.islink(path):
archive.add(path, arcname=dotfile)
fabric.operations.put(temp_tar, '~/dotfiles.tar')
fabric.operations.run('tar -xvf ~/dotfiles.tar')
else:
logging.error('Could not determine IP.')
| sysbot/cloudenvy | cloudenvy/commands/dotfiles.py | Python | apache-2.0 | 1,933 |
import pycparser
def main_eg():
parser = pycparser.CParser()
buf = '''
int main( int argc, char** argv ) {
j = p && r || q;
return j;
}
'''
t = parser.parse( buf, 'x.c' )
return t
if __name__ == "__main__":
t = main_eg()
t.show()
| quenette/COMPASS-I | t/scripts/test_pycparser.py | Python | apache-2.0 | 289 |
# -*- coding: utf-8 -*-
#
# Nablarch解説書 documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 08 09:45:59 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from datetime import date
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
# project = u'∇Nablarch for commercial '
copyright = u'2010-' + str(date.today().year) + u', koyi Inc'
author = u'koyi Inc'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '5U6'
# The full version, including alpha/beta/rc tags.
# release = '5U6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ja'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'custom.css'
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'ja'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Nablarchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Nablarch.tex', u'Nablarch解説書 Documentation',
u'TIS Inc', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nablarch', u'Nablarch解説書 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Nablarch', u'Nablarch解説書 Documentation',
author, 'Nablarch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
extensions = ['sphinx.ext.todo', 'javasphinx']
[extensions]
todo_include_todos=False
javadoc_url_map = {
'nablarch' : ('http://192.168.160.123/javadoc/', 'javadoc'),
'javax.persistence' : ('http://docs.oracle.com/javaee/7/api/', 'javadoc'),
'javax.validation' : ('http://docs.oracle.com/javaee/7/api/', 'javadoc'),
'javax.servlet' : ('http://docs.oracle.com/javaee/7/api/', 'javadoc')
}
| koyi2016/sample | conf.py | Python | apache-2.0 | 9,767 |
# Copyright 2017 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gapit_test_framework import gapit_test, require, require_equal
from gapit_test_framework import GapitTest, require_not_equal
from vulkan_constants import VK_SUCCESS
@gapit_test("vkDeviceWaitIdle_test")
class WaitForSingleQueue(GapitTest):
def expect(self):
device_wait_idle = require(self.nth_call_of("vkDeviceWaitIdle", 1))
require_not_equal(0, device_wait_idle.int_device)
require_equal(VK_SUCCESS, int(device_wait_idle.return_val))
| google/vulkan_test_applications | gapid_tests/synchronization_tests/vkDeviceWaitIdle_test/vkDeviceWaitIdle_test.py | Python | apache-2.0 | 1,043 |
__author__ = 'juan'
import json
from termcolor import colored
import mysql.connector
import time
config = {
'user': 'elec',
'password': 'elec',
'host': 'thor.deusto.es',
'database': 'eu_test2',
}
class database:
def __init__(self):
self.con = mysql.connector.connect(**config)
def insert(self,tweet):
try:
self.con = mysql.connector.connect(**config)
tweet = json.loads(tweet)
self.insert_users(tweet)
#self.insert_tweets(tweet)
self.insert_mention(tweet)
#self.insert_language_group(tweet)
#self.insert_language_candidate(tweet)
#self.insert_hash_country(tweet)
#self.insert_hash_group(tweet)
#self.insert_hash_candidate(tweet)
self.con.commit()
self.con.close()
except Exception, e:
print colored("Insertion error "+ e.message, "red")
def insert_users(self,tweet):
#id TEXT, screen_name TEXT, total_tweets INT
keys = [tweet['user']['id'], tweet['user']['screen_name'],1]
try:
cursor = self.con.cursor()
select = "SELECT id, total_tweets from twitter_users where id="+str(keys[0])
cursor.execute(select)
node = cursor.fetchone()
if node:
total = node[1]+1
update = "UPDATE twitter_users set total_tweets = "+str(total)+" where id = "+str(keys[0])
cursor.execute(update)
else:
insert = "INSERT INTO twitter_users(id, screen_name, total_tweets) VALUES (" + str(keys[0]) + ",'" + keys[1] + "', 1)"
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_user: ", e
def insert_tweets(self,tweet):
# id TEXT, user_id TEXT, text TEXT, created_at DATE, lang TEXT, retweeted BOOL
date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
keys = [tweet['id'], tweet['user']['id'], tweet['text'], date, tweet['lang'], tweet['retweeted']]
try:
cursor = self.con.cursor()
insert = "INSERT INTO tweets(id, user_id, id_str, text, created_at, lang, retweeted) VALUES ('"+str(keys[0])+"','"+str(keys[1])+"','"+str(keys[0])+"','"+keys[2]+"','"+keys[3]+"','"+keys[4]+"','"+str(bool(keys[5]))+"')"
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_tweet: ", e
def insert_mention(self,tweet):
#user_id INT, target_id INT, day DATE, weight INT
replies = tweet['in_reply_to_user_id']
replies_screen_name = tweet['in_reply_to_screen_name']
date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
if replies:
keys = [tweet['user']['id'], replies, date, 1]
try:
cursor = self.con.cursor()
cursor.execute("SELECT * from interactions where (user_id = '"+str(tweet['user']['id'])+"' AND target_id = '"+str(replies)+"' AND day = '"+str(date)+"')")
node = cursor.fetchone()
if node:
total = node[3]+1
cursor.execute("UPDATE interactions set weight = '"+str(total)+"' WHERE (user_id = '"+str(tweet['user']['id'])+"' AND target_id = '"+str(replies)+"' AND day = '"+str(date)+"')")
else:
insert = "INSERT INTO interactions(user_id, target_id, day, weight) VALUES ('"+str(keys[0])+"','"+str(keys[1])+"','"+str(keys[2])+"','"+str(keys[3])+"') "
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_mention: ", e
try:
cursor = self.con.cursor()
select = "SELECT id from twitter_users WHERE id="+str(replies)+";"
print select
cursor.execute(select)
node = cursor.fetchone()
if node:
print node
else:
insert = "INSERT INTO twitter_users(id, screen_name, total_tweets) VALUES (" + str(replies) + ",'" + replies_screen_name + "', 1)"
cursor.execute(insert)
print "added"
################
except Exception, e:
print "DB Error - insert_mentionAA: ", e
def insert_language_group(self,tweet):
#lang TEXT, group_id TEXT, total INT
keys = [tweet['lang'], "ALDE", 1]
try:
cursor = self.con.cursor()
cursor.execute("SELECT total from language_group WHERE ( lang='"+tweet['lang']+"' AND group_id ='"+"ALDE"+"')")
node = cursor.fetchone()
if node:
total = node[0]+1
cursor.execute("UPDATE language_group set total = "+str(total)+" WHERE ( lang='"+tweet['lang']+"' AND group_id ='"+"ALDE"+"')")
else:
cursor.execute("INSERT INTO language_group(lang,group_id,total) VALUES ('"+keys[0]+"','"+keys[1]+"','"+str(keys[2])+"')")
except Exception, e:
print "DB Error - language_group: ", e
def insert_language_candidate(self,tweet):
#lang TEXT, candidate_id INT, total INT
keys = [tweet['lang'], 44101578, 1]
try:
cursor = self.con.cursor()
cursor.execute("SELECT total from language_candidate WHERE ( lang='"+tweet['lang']+"' AND candidate_id ='"+str(44101578)+"')")
node = cursor.fetchone()
if node:
total = node[0]+1
cursor.execute("UPDATE language_candidate set total = "+str(total)+" WHERE ( lang='"+tweet['lang']+"' AND candidate_id ='"+str(44101578)+"')")
else:
cursor.execute("INSERT INTO language_candidate(lang,candidate_id,total) VALUES ('"+keys[0]+"','"+str(keys[1])+"','"+str(keys[2])+"')")
except Exception, e:
print "DB Error - language_candidate: ", e
def insert_hash_country(self,tweet):
#text TEXT, country_id TEXT, day DATE, total INT
hashtags = tweet['entities']['hashtags']
date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
for h in hashtags:
hashtag = h['text']
keys = [hashtag, tweet['lang'], date, 1]
try:
cursor = self.con.cursor()
cursor.execute("SELECT text, total from hash_country WHERE ( text='"+hashtag+"' AND country_id = '"+tweet['lang']+"' AND day = '"+str(date)+"')")
node = cursor.fetchone()
if node:
total = node[1]+1
cursor.execute("UPDATE hash_country set total = "+str(total)+" WHERE ( text='"+hashtag+"' AND country_id = '"+tweet['lang']+"' AND day = '"+str(date)+"')")
else:
insert = "INSERT INTO hash_country(text, country_id, day, total) VALUES ('"+hashtag+"','"+tweet['lang']+"','"+str(date)+"','"+str(1)+"' )"
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_hash_country: ", e
def insert_hash_group(self,tweet):
#text TEXT, group_id TEXT, day DATE, total INT
hashtags = tweet['entities']['hashtags']
date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
for h in hashtags:
hashtag = h['text']
try:
cursor = self.con.cursor()
cursor.execute("SELECT text, total from hash_group WHERE ( text='"+hashtag+"' AND group_id = 'ALDE' AND day = '"+str(date)+"')")
node = cursor.fetchone()
if node:
total = node[1]+1
cursor.execute("UPDATE hash_group set total = "+str(total)+" WHERE ( text='"+hashtag+"' AND group_id = 'ALDE' AND day = '"+str(date)+"')")
else:
insert = "INSERT INTO hash_group(text, group_id, day, total) VALUES ('"+hashtag+"','"+"ALDE"+"','"+str(date)+"','"+str(1)+"' )"
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_hash_group: ", e
def insert_hash_candidate(self,tweet):
#text TEXT, candidate_id INT, day DATE, total INT
hashtags = tweet['entities']['hashtags']
date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
for h in hashtags:
hashtag = h['text']
try:
cursor = self.con.cursor()
cursor.execute("SELECT text, total from hash_candidate WHERE ( text='"+hashtag+"' AND candidate_id = "+str(44101578)+" AND day = '"+str(date)+"')")
node = cursor.fetchone()
if node:
total = node[1]+1
cursor.execute("UPDATE hash_candidate set total = "+str(total)+" WHERE ( text='"+hashtag+"' AND candidate_id = "+str(44101578)+" AND day = '"+str(date)+"')")
else:
insert = "INSERT INTO hash_candidate(text, candidate_id, day, total) VALUES ('"+hashtag+"','"+str(44101578)+"','"+str(date)+"','"+str(1)+"' )"
cursor.execute(insert)
except Exception, e:
print "DB Error - insert_hash_group: ", e
| aitoralmeida/eu-elections | analyzer/remote_database.py | Python | apache-2.0 | 9,695 |
from .column_pair_values_equal import ColumnPairValuesEqual
from .column_pair_values_greater import ColumnPairValuesAGreaterThanB
from .column_pair_values_in_set import ColumnPairValuesInSet
| great-expectations/great_expectations | great_expectations/expectations/metrics/column_pair_map_metrics/__init__.py | Python | apache-2.0 | 191 |
import copy
import datetime
import logging
import math
import operator
import traceback
from collections import namedtuple
from typing import Any, Dict, Optional, Tuple
from pyparsing import (
CaselessKeyword,
Combine,
Forward,
Group,
Literal,
ParseException,
Regex,
Suppress,
Word,
alphanums,
alphas,
delimitedList,
dictOf,
)
from great_expectations.core.urn import ge_urn
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.exceptions import EvaluationParameterError
logger = logging.getLogger(__name__)
_epsilon = 1e-12
class EvaluationParameterParser:
"""
This Evaluation Parameter Parser uses pyparsing to provide a basic expression language capable of evaluating
parameters using values available only at run time.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
The parser is modified from: https://github.com/pyparsing/pyparsing/blob/master/examples/fourFn.py
"""
# map operator symbols to corresponding arithmetic operations
opn = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"^": operator.pow,
}
fn = {
"sin": math.sin,
"cos": math.cos,
"tan": math.tan,
"exp": math.exp,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"sgn": lambda a: -1 if a < -_epsilon else 1 if a > _epsilon else 0,
"now": datetime.datetime.now,
"datetime": datetime.datetime,
"timedelta": datetime.timedelta,
}
def __init__(self):
self.exprStack = []
self._parser = None
def push_first(self, toks):
self.exprStack.append(toks[0])
def push_unary_minus(self, toks):
for t in toks:
if t == "-":
self.exprStack.append("unary -")
else:
break
def clear_stack(self):
del self.exprStack[:]
def get_parser(self):
self.clear_stack()
if not self._parser:
# use CaselessKeyword for e and pi, to avoid accidentally matching
# functions that start with 'e' or 'pi' (such as 'exp'); Keyword
# and CaselessKeyword only match whole words
e = CaselessKeyword("E")
pi = CaselessKeyword("PI")
# fnumber = Combine(Word("+-"+nums, nums) +
# Optional("." + Optional(Word(nums))) +
# Optional(e + Word("+-"+nums, nums)))
# or use provided pyparsing_common.number, but convert back to str:
# fnumber = ppc.number().addParseAction(lambda t: str(t[0]))
fnumber = Regex(r"[+-]?(?:\d+|\.\d+)(?:\.\d+)?(?:[eE][+-]?\d+)?")
ge_urn = Combine(
Literal("urn:great_expectations:")
+ Word(alphas, f"{alphanums}_$:?=%.&")
)
variable = Word(alphas, f"{alphanums}_$")
ident = ge_urn | variable
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal("^")
expr = Forward()
expr_list = delimitedList(Group(expr))
# We will allow functions either to accept *only* keyword
# expressions or *only* non-keyword expressions
# define function keyword arguments
key = Word(f"{alphas}_") + Suppress("=")
# value = (fnumber | Word(alphanums))
value = expr
keyval = dictOf(key.setParseAction(self.push_first), value)
kwarglist = delimitedList(keyval)
# add parse action that replaces the function identifier with a (name, number of args, has_fn_kwargs) tuple
# 20211009 - JPC - Note that it's important that we consider kwarglist
# first as part of disabling backtracking for the function's arguments
fn_call = (ident + lpar + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), 0, False))
) | (
(ident + lpar - Group(expr_list) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), False))
)
^ (ident + lpar - Group(kwarglist) + rpar).setParseAction(
lambda t: t.insert(0, (t.pop(0), len(t[0]), True))
)
)
atom = (
addop[...]
+ (
(fn_call | pi | e | fnumber | ident).setParseAction(self.push_first)
| Group(lpar + expr + rpar)
)
).setParseAction(self.push_unary_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left
# exponents, instead of left-to-right that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor <<= atom + (expop + factor).setParseAction(self.push_first)[...]
term = factor + (multop + factor).setParseAction(self.push_first)[...]
expr <<= term + (addop + term).setParseAction(self.push_first)[...]
self._parser = expr
return self._parser
def evaluate_stack(self, s):
op, num_args, has_fn_kwargs = s.pop(), 0, False
if isinstance(op, tuple):
op, num_args, has_fn_kwargs = op
if op == "unary -":
return -self.evaluate_stack(s)
if op in "+-*/^":
# note: operands are pushed onto the stack in reverse order
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
# note: args are pushed onto the stack in reverse order
if has_fn_kwargs:
kwargs = dict()
for _ in range(num_args):
v = self.evaluate_stack(s)
k = s.pop()
kwargs.update({k: v})
return self.fn[op](**kwargs)
else:
args = reversed([self.evaluate_stack(s) for _ in range(num_args)])
return self.fn[op](*args)
else:
# try to evaluate as int first, then as float if int fails
# NOTE: JPC - 20200403 - Originally I considered returning the raw op here if parsing as float also
# fails, but I decided against it to instead require that the *entire* expression evaluates
# numerically UNLESS there is *exactly one* expression to substitute (see cases where len(L) == 1 in the
# parse_evaluation_parameter method.
try:
return int(op)
except ValueError:
return float(op)
def build_evaluation_parameters(
expectation_args: dict,
evaluation_parameters: Optional[dict] = None,
interactive_evaluation: bool = True,
data_context=None,
) -> Tuple[dict, dict]:
"""Build a dictionary of parameters to evaluate, using the provided evaluation_parameters,
AND mutate expectation_args by removing any parameter values passed in as temporary values during
exploratory work.
"""
evaluation_args = copy.deepcopy(expectation_args)
substituted_parameters = {}
# Iterate over arguments, and replace $PARAMETER-defined args with their
# specified parameters.
for key, value in evaluation_args.items():
if isinstance(value, dict) and "$PARAMETER" in value:
# We do not even need to search for a value if we are not going to do interactive evaluation
if not interactive_evaluation:
continue
# First, check to see whether an argument was supplied at runtime
# If it was, use that one, but remove it from the stored config
param_key = f"$PARAMETER.{value['$PARAMETER']}"
if param_key in value:
evaluation_args[key] = evaluation_args[key][param_key]
del expectation_args[key][param_key]
# If not, try to parse the evaluation parameter and substitute, which will raise
# an exception if we do not have a value
else:
raw_value = value["$PARAMETER"]
parameter_value = parse_evaluation_parameter(
raw_value,
evaluation_parameters=evaluation_parameters,
data_context=data_context,
)
evaluation_args[key] = parameter_value
# Once we've substituted, we also track that we did so
substituted_parameters[key] = parameter_value
return evaluation_args, substituted_parameters
expr = EvaluationParameterParser()
def find_evaluation_parameter_dependencies(parameter_expression):
"""Parse a parameter expression to identify dependencies including GE URNs.
Args:
parameter_expression: the parameter to parse
Returns:
a dictionary including:
- "urns": set of strings that are valid GE URN objects
- "other": set of non-GE URN strings that are required to evaluate the parameter expression
"""
expr = EvaluationParameterParser()
dependencies = {"urns": set(), "other": set()}
# Calling get_parser clears the stack
parser = expr.get_parser()
try:
_ = parser.parseString(parameter_expression, parseAll=True)
except ParseException as err:
raise EvaluationParameterError(
f"Unable to parse evaluation parameter: {str(err)} at line {err.line}, column {err.column}"
)
except AttributeError as err:
raise EvaluationParameterError(
f"Unable to parse evaluation parameter: {str(err)}"
)
for word in expr.exprStack:
if isinstance(word, (int, float)):
continue
if not isinstance(word, str):
# If we have a function that itself is a tuple (e.g. (trunc, 1))
continue
if word in expr.opn or word in expr.fn or word == "unary -":
# operations and functions
continue
# if this is parseable as a number, then we do not include it
try:
_ = float(word)
continue
except ValueError:
pass
try:
_ = ge_urn.parseString(word)
dependencies["urns"].add(word)
continue
except ParseException:
# This particular evaluation_parameter or operator is not a valid URN
pass
# If we got this far, it's a legitimate "other" evaluation parameter
dependencies["other"].add(word)
return dependencies
def parse_evaluation_parameter(
parameter_expression: str,
evaluation_parameters: Optional[Dict[str, Any]] = None,
data_context: Optional[Any] = None, # Cannot type 'DataContext' due to import cycle
) -> Any:
"""Use the provided evaluation_parameters dict to parse a given parameter expression.
Args:
parameter_expression (str): A string, potentially containing basic arithmetic operations and functions,
and variables to be substituted
evaluation_parameters (dict): A dictionary of name-value pairs consisting of values to substitute
data_context (DataContext): A data context to use to obtain metrics, if necessary
The parser will allow arithmetic operations +, -, /, *, as well as basic functions, including trunc() and round() to
obtain integer values when needed for certain expectations (e.g. expect_column_value_length_to_be_between).
Valid variables must begin with an alphabetic character and may contain alphanumeric characters plus '_' and '$',
EXCEPT if they begin with the string "urn:great_expectations" in which case they may also include additional
characters to support inclusion of GE URLs (see :ref:`evaluation_parameters` for more information).
"""
if evaluation_parameters is None:
evaluation_parameters = {}
# Calling get_parser clears the stack
parser = expr.get_parser()
try:
L = parser.parseString(parameter_expression, parseAll=True)
except ParseException as err:
L = ["Parse Failure", parameter_expression, (str(err), err.line, err.column)]
# Represents a valid parser result of a single function that has no arguments
if len(L) == 1 and isinstance(L[0], tuple) and L[0][2] is False:
# Necessary to catch `now()` (which only needs to be evaluated with `expr.exprStack`)
# NOTE: 20211122 - Chetan - Any future built-ins that are zero arity functions will match this behavior
pass
elif len(L) == 1 and L[0] not in evaluation_parameters:
# In this special case there were no operations to find, so only one value, but we don't have something to
# substitute for that value
try:
res = ge_urn.parseString(L[0])
if res["urn_type"] == "stores":
store = data_context.stores.get(res["store_name"])
return store.get_query_result(
res["metric_name"], res.get("metric_kwargs", {})
)
else:
logger.error(
"Unrecognized urn_type in ge_urn: must be 'stores' to use a metric store."
)
raise EvaluationParameterError(
f"No value found for $PARAMETER {str(L[0])}"
)
except ParseException as e:
logger.debug(
f"Parse exception while parsing evaluation parameter: {str(e)}"
)
raise EvaluationParameterError(f"No value found for $PARAMETER {str(L[0])}")
except AttributeError:
logger.warning("Unable to get store for store-type valuation parameter.")
raise EvaluationParameterError(f"No value found for $PARAMETER {str(L[0])}")
elif len(L) == 1:
# In this case, we *do* have a substitution for a single type. We treat this specially because in this
# case, we allow complex type substitutions (i.e. do not coerce to string as part of parsing)
# NOTE: 20201023 - JPC - to support MetricDefinition as an evaluation parameter type, we need to handle that
# case here; is the evaluation parameter provided here in fact a metric definition?
return evaluation_parameters[L[0]]
elif len(L) == 0 or L[0] != "Parse Failure":
for i, ob in enumerate(expr.exprStack):
if isinstance(ob, str) and ob in evaluation_parameters:
expr.exprStack[i] = str(evaluation_parameters[ob])
else:
err_str, err_line, err_col = L[-1]
raise EvaluationParameterError(
f"Parse Failure: {err_str}\nStatement: {err_line}\nColumn: {err_col}"
)
try:
result = expr.evaluate_stack(expr.exprStack)
result = convert_to_json_serializable(result)
except Exception as e:
exception_traceback = traceback.format_exc()
exception_message = (
f'{type(e).__name__}: "{str(e)}". Traceback: "{exception_traceback}".'
)
logger.debug(exception_message, e, exc_info=True)
raise EvaluationParameterError(
f"Error while evaluating evaluation parameter expression: {str(e)}"
)
return result
def _deduplicate_evaluation_parameter_dependencies(dependencies: dict) -> dict:
deduplicated = {}
for suite_name, required_metrics in dependencies.items():
deduplicated[suite_name] = []
metrics = set()
metric_kwargs = {}
for metric in required_metrics:
if isinstance(metric, str):
metrics.add(metric)
elif isinstance(metric, dict):
# There is a single metric_kwargs_id object in this construction
for kwargs_id, metric_list in metric["metric_kwargs_id"].items():
if kwargs_id not in metric_kwargs:
metric_kwargs[kwargs_id] = set()
for metric_name in metric_list:
metric_kwargs[kwargs_id].add(metric_name)
deduplicated[suite_name] = list(metrics)
if len(metric_kwargs) > 0:
deduplicated[suite_name] = deduplicated[suite_name] + [
{
"metric_kwargs_id": {
metric_kwargs: list(metrics_set)
for (metric_kwargs, metrics_set) in metric_kwargs.items()
}
}
]
return deduplicated
EvaluationParameterIdentifier = namedtuple(
"EvaluationParameterIdentifier",
["expectation_suite_name", "metric_name", "metric_kwargs_id"],
)
| great-expectations/great_expectations | great_expectations/core/evaluation_parameters.py | Python | apache-2.0 | 17,237 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains tests related to MLPerf.
Note this test only passes if the MLPerf compliance library is installed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
import logging
import re
import six
import tensorflow.compat.v1 as tf
import benchmark_cnn
import datasets
import mlperf
import test_util
from models import model
from mlperf_compliance import mlperf_log
class _MlPerfTestModel(model.CNNModel):
"""A model to test the MLPerf compliance logging on."""
def __init__(self):
super(_MlPerfTestModel, self).__init__(
'mlperf_test_model', image_size=224, batch_size=2, learning_rate=1)
def add_inference(self, cnn):
assert cnn.top_layer.shape[1:] == (3, 224, 224)
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=True)
cnn.mpool(1, 1, 1, 1, num_channels_in=1)
cnn.reshape([-1, 224 * 224])
cnn.affine(1, activation=None)
# Assert that the batch norm variables are filtered out for L2 loss.
variables = tf.global_variables() + tf.local_variables()
assert len(variables) > len(self.filter_l2_loss_vars(variables))
class MlPerfComplianceTest(tf.test.TestCase):
"""Tests the MLPerf compliance logs.
This serves as a quick check that we probably didn't break the compliance
logging. It is not mean to be as comprehensive as the official MLPerf
compliance checker will be.
"""
def setUp(self):
super(MlPerfComplianceTest, self).setUp()
benchmark_cnn.setup(benchmark_cnn.make_params())
# Map between regex and the number of times we expect to see that regex in the
# logs. Entry commented out with the comment FIXME indicate that
# tf_cnn_benchmarks currently fails compliance in that regard, and needs to be
# fixed to be MLPerf compliant.
EXPECTED_LOG_REGEXES = {
# Preprocessing tags
mlperf.tags.INPUT_ORDER: 2, # 1 for training, 1 for eval
# We pass --tf_random_seed=9876 in the test.
r'%s: 9876' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
# The Numpy random seed is hardcoded to 4321.
r'%s: 4321' % mlperf.tags.RUN_SET_RANDOM_SEED: 2,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_TRAIN_EXAMPLES,
datasets.IMAGENET_NUM_TRAIN_IMAGES): 1,
r'%s: %d' % (mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES,
datasets.IMAGENET_NUM_VAL_IMAGES): 1,
mlperf.tags.PREPROC_NUM_EVAL_EXAMPLES + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MIN_OBJ_COV + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_RATIO_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_AREA_RANGE + '.*': 1,
mlperf.tags.INPUT_DISTORTED_CROP_MAX_ATTEMPTS + '.*': 1,
mlperf.tags.INPUT_RANDOM_FLIP + '.*': 1,
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_CENTRAL_CROP: 1,
r'%s: \[123.68, 116.78, 103.94\].*' % mlperf.tags.INPUT_MEAN_SUBTRACTION:
2,
r'%s: {"min": 256}.*' % mlperf.tags.INPUT_RESIZE_ASPECT_PRESERVING: 1,
# 1 for training, 1 for eval
r'%s: \[224, 224\].*' % mlperf.tags.INPUT_RESIZE: 2,
# Resnet model tags
mlperf.tags.MODEL_HP_BATCH_NORM + '.*': 2,
# 2 for training, 2 for eval. Although there's only 1 conv2d, each conv2d
# produces 2 logs.
mlperf.tags.MODEL_HP_CONV2D_FIXED_PADDING + '.*': 4,
mlperf.tags.MODEL_HP_RELU + '.*': 2,
mlperf.tags.MODEL_HP_INITIAL_MAX_POOL + '.*': 2,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
mlperf.tags.MODEL_HP_DENSE + '.*': 4,
# Note that tags our test model does not emit, like MODEL_HP_SHORTCUT_ADD,
# are omitted here.
r'%s: "categorical_cross_entropy".*' % mlperf.tags.MODEL_HP_LOSS_FN: 1,
# 1 for training, 2 because the _MlPerfTestModel calls this when building
# the model for both training and eval
r'%s: true' % mlperf.tags.MODEL_EXCLUDE_BN_FROM_L2: 3,
r'%s: 0.5.*' % mlperf.tags.MODEL_L2_REGULARIZATION: 1,
# Note we do not handle OPT_LR, since that is printed to stderr using
# tf.Print, which we cannot easily intercept.
# Other tags
'%s: "%s"' % (mlperf.tags.OPT_NAME, mlperf.tags.SGD_WITH_MOMENTUM): 1,
'%s: 0.5' % mlperf.tags.OPT_MOMENTUM: 1,
mlperf.tags.RUN_START: 1,
'%s: 2' % mlperf.tags.INPUT_BATCH_SIZE: 1,
mlperf.tags.TRAIN_LOOP: 1,
mlperf.tags.TRAIN_EPOCH + '.*': 1,
'%s: 2' % mlperf.tags.INPUT_SIZE: 2,
mlperf.tags.EVAL_START: 2,
mlperf.tags.EVAL_STOP: 2,
'%s: 6' % mlperf.tags.EVAL_SIZE: 2,
mlperf.tags.EVAL_ACCURACY + '.*': 2,
'%s: 2.0' % mlperf.tags.EVAL_TARGET: 2,
mlperf.tags.RUN_STOP + '.*': 1,
mlperf.tags.RUN_FINAL: 1
}
EXPECTED_LOG_REGEXES = Counter({re.compile(k): v for
k, v in EXPECTED_LOG_REGEXES.items()})
def testMlPerfCompliance(self):
string_io = six.StringIO()
handler = logging.StreamHandler(string_io)
data_dir = test_util.create_black_and_white_images()
try:
mlperf_log.LOGGER.addHandler(handler)
params = benchmark_cnn.make_params(data_dir=data_dir,
data_name='imagenet',
batch_size=2,
num_warmup_batches=0,
num_batches=2,
num_eval_batches=3,
eval_during_training_every_n_steps=1,
distortions=False,
weight_decay=0.5,
optimizer='momentum',
momentum=0.5,
stop_at_top_1_accuracy=2.0,
tf_random_seed=9876,
ml_perf=True)
with mlperf.mlperf_logger(use_mlperf_logger=True, model='resnet50_v1.5'):
bench_cnn = benchmark_cnn.BenchmarkCNN(params, model=_MlPerfTestModel())
bench_cnn.run()
logs = string_io.getvalue().splitlines()
log_regexes = Counter()
for log in logs:
for regex in self.EXPECTED_LOG_REGEXES:
if regex.search(log):
log_regexes[regex] += 1
if log_regexes != self.EXPECTED_LOG_REGEXES:
diff_counter = Counter(log_regexes)
diff_counter.subtract(self.EXPECTED_LOG_REGEXES)
differences = []
for regex in (k for k in diff_counter.keys() if diff_counter[k]):
found_count = log_regexes[regex]
expected_count = self.EXPECTED_LOG_REGEXES[regex]
differences.append(' For regex %s: Found %d lines matching but '
'expected to find %d' %
(regex.pattern, found_count, expected_count))
raise AssertionError('Logs did not match expected logs. Differences:\n'
'%s' % '\n'.join(differences))
finally:
mlperf_log.LOGGER.removeHandler(handler)
if __name__ == '__main__':
tf.disable_v2_behavior()
tf.test.main()
| tensorflow/benchmarks | scripts/tf_cnn_benchmarks/mlperf_test.py | Python | apache-2.0 | 7,794 |
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import d1_test.d1_test_case
import d1_test.instance_generator.person
# ===============================================================================
@d1_test.d1_test_case.reproducible_random_decorator("TestPerson")
class TestPerson(d1_test.d1_test_case.D1TestCase):
def test_1000(self):
"""generate()"""
person_list = [
d1_test.instance_generator.person.generate().toxml("utf-8")
for _ in range(3)
]
self.sample.assert_equals(person_list, "inst_gen_person")
| DataONEorg/d1_python | test_utilities/src/d1_test/instance_generator/tests/test_person.py | Python | apache-2.0 | 1,335 |
"""A collection of ORM sqlalchemy models for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import functools
import json
import logging
import re
import textwrap
from collections import namedtuple
from copy import deepcopy, copy
from datetime import timedelta, datetime, date
import humanize
import pandas as pd
import requests
import sqlalchemy as sqla
from sqlalchemy.engine.url import make_url
import sqlparse
from dateutil.parser import parse
from flask import Markup, url_for
from flask import escape, g, Markup, request
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import get_file_original_name
from flask_babel import lazy_gettext as _
from pydruid.client import PyDruid
from pydruid.utils.filters import Dimension, Filter
from pydruid.utils.postaggregator import Postaggregator
from pydruid.utils.having import Aggregation
from six import string_types
from sqlalchemy import (
Column, Integer, String, ForeignKey, Text, Boolean,
DateTime, Date, Table, Numeric,
create_engine, MetaData, desc, asc, select, and_, func
)
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from sqlalchemy.sql import table, literal_column, text, column
from sqlalchemy.sql.expression import ColumnClause, TextAsFrom
from sqlalchemy_utils import EncryptedType
from werkzeug.datastructures import ImmutableMultiDict
import caravel
from caravel import app, db, get_session, utils, sm
from caravel.source_registry import SourceRegistry
from caravel.viz import viz_types
from caravel.utils import flasher, MetricPermException, DimSelector
config = app.config
QueryResult = namedtuple('namedtuple', ['df', 'query', 'duration'])
FillterPattern = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
class JavascriptPostAggregator(Postaggregator):
def __init__(self, name, field_names, function):
self.post_aggregator = {
'type': 'javascript',
'fieldNames': field_names,
'name': name,
'function': function,
}
self.name = name
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = Column(DateTime, default=datetime.now, nullable=True)
changed_on = Column(
DateTime, default=datetime.now,
onupdate=datetime.now, nullable=True)
@declared_attr
def created_by_fk(cls): # noqa
return Column(Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, nullable=True)
@declared_attr
def changed_by_fk(cls): # noqa
return Column(
Integer, ForeignKey('ab_user.id'),
default=cls.get_user_id, onupdate=cls.get_user_id, nullable=True)
@renders('created_on')
def creator(self): # noqa
return '{}'.format(self.created_by or '')
@property
def changed_by_(self):
return '{}'.format(self.changed_by or '')
@renders('changed_on')
def changed_on_(self):
return Markup(
'<span class="no-wrap">{}</span>'.format(self.changed_on))
@renders('changed_on')
def modified(self):
s = humanize.naturaltime(datetime.now() - self.changed_on)
return Markup('<span class="no-wrap">{}</span>'.format(s))
@property
def icons(self):
return """
<a
href="{self.datasource_edit_url}"
data-toggle="tooltip"
title="{self.datasource}">
<i class="fa fa-database"></i>
</a>
""".format(**locals())
class Url(Model, AuditMixinNullable):
"""Used for the short url feature"""
__tablename__ = 'url'
id = Column(Integer, primary_key=True)
url = Column(Text)
class CssTemplate(Model, AuditMixinNullable):
"""CSS templates for dashboards"""
__tablename__ = 'css_templates'
id = Column(Integer, primary_key=True)
template_name = Column(String(250))
css = Column(Text, default='')
slice_user = Table('slice_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('slice_id', Integer, ForeignKey('slices.id'))
)
class Slice(Model, AuditMixinNullable):
"""A slice is essentially a report or a view on data"""
__tablename__ = 'slices'
id = Column(Integer, primary_key=True)
slice_name = Column(String(250))
datasource_id = Column(Integer)
datasource_type = Column(String(200))
datasource_name = Column(String(2000))
viz_type = Column(String(250))
params = Column(Text)
description = Column(Text)
cache_timeout = Column(Integer)
perm = Column(String(2000))
owners = relationship("User", secondary=slice_user)
def __repr__(self):
return self.slice_name
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(
self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@renders('datasource_name')
def datasource_link(self):
return self.datasource.link
@property
def datasource_edit_url(self):
self.datasource.url
@property
@utils.memoized
def viz(self):
d = json.loads(self.params)
viz_class = viz_types[self.viz_type]
return viz_class(self.datasource, form_data=d)
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def data(self):
"""Data used to render slice in templates"""
d = {}
self.token = ''
try:
d = self.viz.data
self.token = d.get('token')
except Exception as e:
d['error'] = str(e)
d['slice_id'] = self.id
d['slice_name'] = self.slice_name
d['description'] = self.description
d['slice_url'] = self.slice_url
d['edit_url'] = self.edit_url
d['description_markeddown'] = self.description_markeddown
return d
@property
def json_data(self):
return json.dumps(self.data)
@property
def slice_url(self):
"""Defines the url to access the slice"""
try:
slice_params = json.loads(self.params)
except Exception as e:
logging.exception(e)
slice_params = {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
from werkzeug.urls import Href
href = Href(
"/caravel/explore/{obj.datasource_type}/"
"{obj.datasource_id}/".format(obj=self))
return href(slice_params)
@property
def edit_url(self):
return "/slicemodelview/edit/{}".format(self.id)
@property
def slice_link(self):
url = self.slice_url
name = escape(self.slice_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_viz(self, url_params_multidict=None):
"""Creates :py:class:viz.BaseViz object from the url_params_multidict.
:param werkzeug.datastructures.MultiDict url_params_multidict:
Contains the visualization params, they override the self.params
stored in the database
:return: object of the 'viz_type' type that is taken from the
url_params_multidict or self.params.
:rtype: :py:class:viz.BaseViz
"""
slice_params = json.loads(self.params) # {}
slice_params['slice_id'] = self.id
slice_params['json'] = "false"
slice_params['slice_name'] = self.slice_name
slice_params['viz_type'] = self.viz_type if self.viz_type else "table"
if url_params_multidict:
slice_params.update(url_params_multidict)
to_del = [k for k in slice_params if k not in url_params_multidict]
for k in to_del:
del slice_params[k]
immutable_slice_params = ImmutableMultiDict(slice_params)
return viz_types[immutable_slice_params.get('viz_type')](
self.datasource,
form_data=immutable_slice_params,
slice_=self
)
def set_perm(mapper, connection, target): # noqa
src_class = target.cls_model
id_ = target.datasource_id
ds = db.session.query(src_class).filter_by(id=int(id_)).first()
target.perm = ds.perm
sqla.event.listen(Slice, 'before_insert', set_perm)
sqla.event.listen(Slice, 'before_update', set_perm)
dashboard_slices = Table(
'dashboard_slices', Model.metadata,
Column('id', Integer, primary_key=True),
Column('dashboard_id', Integer, ForeignKey('dashboards.id')),
Column('slice_id', Integer, ForeignKey('slices.id')),
)
dashboard_user = Table(
'dashboard_user', Model.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('ab_user.id')),
Column('dashboard_id', Integer, ForeignKey('dashboards.id'))
)
class Dashboard(Model, AuditMixinNullable):
"""The dashboard object!"""
__tablename__ = 'dashboards'
id = Column(Integer, primary_key=True)
dashboard_title = Column(String(500))
position_json = Column(Text)
description = Column(Text)
css = Column(Text)
json_metadata = Column(Text)
slug = Column(String(255), unique=True)
slices = relationship(
'Slice', secondary=dashboard_slices, backref='dashboards')
owners = relationship("User", secondary=dashboard_user)
def __repr__(self):
return self.dashboard_title
@property
def table_names(self):
return ", ".join({"{}".format(s.datasource) for s in self.slices})
@property
def url(self):
return "/caravel/dashboard/{}/".format(self.slug or self.id)
@property
def metadata_dejson(self):
if self.json_metadata:
return json.loads(self.json_metadata)
else:
return {}
@property
def sqla_metadata(self):
metadata = MetaData(bind=self.get_sqla_engine())
return metadata.reflect()
def dashboard_link(self):
title = escape(self.dashboard_title)
return Markup(
'<a href="{self.url}">{title}</a>'.format(**locals()))
@property
def json_data(self):
d = {
'id': self.id,
'metadata': self.metadata_dejson,
'dashboard_title': self.dashboard_title,
'slug': self.slug,
'slices': [slc.data for slc in self.slices],
'position_json': json.loads(self.position_json) if self.position_json else [],
}
return json.dumps(d)
class Queryable(object):
"""A common interface to objects that are queryable (tables and datasources)"""
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def main_dttm_col(self):
return "timestamp"
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
class Database(Model, AuditMixinNullable):
"""An ORM object that stores Database related information"""
__tablename__ = 'dbs'
id = Column(Integer, primary_key=True)
database_name = Column(String(250), unique=True)
sqlalchemy_uri = Column(String(1024))
password = Column(EncryptedType(String(1024), config.get('SECRET_KEY')))
cache_timeout = Column(Integer)
select_as_create_table_as = Column(Boolean, default=False)
expose_in_sqllab = Column(Boolean, default=False)
allow_run_sync = Column(Boolean, default=True)
allow_run_async = Column(Boolean, default=False)
allow_ctas = Column(Boolean, default=False)
allow_dml = Column(Boolean, default=False)
force_ctas_schema = Column(String(250))
extra = Column(Text, default=textwrap.dedent("""\
{
"metadata_params": {},
"engine_params": {}
}
"""))
def __repr__(self):
return self.database_name
@property
def backend(self):
url = make_url(self.sqlalchemy_uri_decrypted)
return url.get_backend_name()
def set_sqlalchemy_uri(self, uri):
conn = sqla.engine.url.make_url(uri)
self.password = conn.password
conn.password = "X" * 10 if conn.password else None
self.sqlalchemy_uri = str(conn) # hides the password
def get_sqla_engine(self, schema=None):
extra = self.get_extra()
url = make_url(self.sqlalchemy_uri_decrypted)
params = extra.get('engine_params', {})
if self.backend == 'presto' and schema:
if '/' in url.database:
url.database = url.database.split('/')[0] + '/' + schema
else:
url.database += '/' + schema
elif schema:
url.database = schema
return create_engine(url, **params)
def get_df(self, sql, schema):
eng = self.get_sqla_engine(schema=schema)
cur = eng.execute(sql, schema=schema)
cols = [col[0] for col in cur.cursor.description]
df = pd.DataFrame(cur.fetchall(), columns=cols)
return df
def compile_sqla_query(self, qry, schema=None):
eng = self.get_sqla_engine(schema=schema)
compiled = qry.compile(eng, compile_kwargs={"literal_binds": True})
return '{}'.format(compiled)
def select_star(self, table_name, schema=None, limit=1000):
"""Generates a ``select *`` statement in the proper dialect"""
qry = select('*').select_from(text(table_name))
if limit:
qry = qry.limit(limit)
return self.compile_sqla_query(qry)
def wrap_sql_limit(self, sql, limit=1000):
qry = (
select('*')
.select_from(TextAsFrom(text(sql), ['*'])
.alias('inner_qry')).limit(limit)
)
return self.compile_sqla_query(qry)
def safe_sqlalchemy_uri(self):
return self.sqlalchemy_uri
@property
def inspector(self):
engine = self.get_sqla_engine()
return sqla.inspect(engine)
def all_table_names(self, schema=None):
return sorted(self.inspector.get_table_names(schema))
def all_view_names(self, schema=None):
views = []
try:
views = self.inspector.get_view_names(schema)
except Exception as e:
pass
return views
def all_schema_names(self):
return sorted(self.inspector.get_schema_names())
def grains(self):
"""Defines time granularity database-specific expressions.
The idea here is to make it easy for users to change the time grain
form a datetime (maybe the source grain is arbitrary timestamps, daily
or 5 minutes increments) to another, "truncated" datetime. Since
each database has slightly different but similar datetime functions,
this allows a mapping between database engines and actual functions.
"""
Grain = namedtuple('Grain', 'name label function')
db_time_grains = {
'presto': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
),
'mysql': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain("second", _('second'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
" + SECOND({col})) SECOND)"),
Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
"INTERVAL HOUR({col}) HOUR)"),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
Grain("month", _('month'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
),
'sqlite': (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
Grain("month", _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days')"),
),
'postgresql': (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATE_TRUNC('second', {col})"),
Grain("minute", _('minute'), "DATE_TRUNC('minute', {col})"),
Grain("hour", _('hour'), "DATE_TRUNC('hour', {col})"),
Grain("day", _('day'), "DATE_TRUNC('day', {col})"),
Grain("week", _('week'), "DATE_TRUNC('week', {col})"),
Grain("month", _('month'), "DATE_TRUNC('month', {col})"),
Grain("year", _('year'), "DATE_TRUNC('year', {col})"),
),
'mssql': (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATEADD(second, "
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')"),
Grain("minute", _('minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}), 0)"),
Grain("5 minute", _('5 minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 5 * 5, 0)"),
Grain("half hour", _('half hour'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 30 * 30, 0)"),
Grain("hour", _('hour'), "DATEADD(hour, "
"DATEDIFF(hour, 0, {col}), 0)"),
Grain("day", _('day'), "DATEADD(day, "
"DATEDIFF(day, 0, {col}), 0)"),
Grain("week", _('week'), "DATEADD(week, "
"DATEDIFF(week, 0, {col}), 0)"),
Grain("month", _('month'), "DATEADD(month, "
"DATEDIFF(month, 0, {col}), 0)"),
Grain("quarter", _('quarter'), "DATEADD(quarter, "
"DATEDIFF(quarter, 0, {col}), 0)"),
Grain("year", _('year'), "DATEADD(year, "
"DATEDIFF(year, 0, {col}), 0)"),
),
}
db_time_grains['redshift'] = db_time_grains['postgresql']
db_time_grains['vertica'] = db_time_grains['postgresql']
for db_type, grains in db_time_grains.items():
if self.sqlalchemy_uri.startswith(db_type):
return grains
def grains_dict(self):
return {grain.name: grain for grain in self.grains()}
def epoch_to_dttm(self, ms=False):
"""Database-specific SQL to convert unix timestamp to datetime
"""
ts2date_exprs = {
'sqlite': "datetime({col}, 'unixepoch')",
'postgresql': "(timestamp 'epoch' + {col} * interval '1 second')",
'mysql': "from_unixtime({col})",
'mssql': "dateadd(S, {col}, '1970-01-01')"
}
ts2date_exprs['redshift'] = ts2date_exprs['postgresql']
ts2date_exprs['vertica'] = ts2date_exprs['postgresql']
for db_type, expr in ts2date_exprs.items():
if self.sqlalchemy_uri.startswith(db_type):
return expr.replace('{col}', '({col}/1000.0)') if ms else expr
raise Exception(_("Unable to convert unix epoch to datetime"))
def get_extra(self):
extra = {}
if self.extra:
try:
extra = json.loads(self.extra)
except Exception as e:
logging.error(e)
return extra
def get_table(self, table_name, schema=None):
extra = self.get_extra()
meta = MetaData(**extra.get('metadata_params', {}))
return Table(
table_name, meta,
schema=schema or None,
autoload=True,
autoload_with=self.get_sqla_engine())
def get_columns(self, table_name, schema=None):
return self.inspector.get_columns(table_name, schema)
def get_indexes(self, table_name, schema=None):
return self.inspector.get_indexes(table_name, schema)
@property
def sqlalchemy_uri_decrypted(self):
conn = sqla.engine.url.make_url(self.sqlalchemy_uri)
conn.password = self.password
return str(conn)
@property
def sql_url(self):
return '/caravel/sql/{}/'.format(self.id)
@property
def perm(self):
return (
"[{obj.database_name}].(id:{obj.id})").format(obj=self)
class SqlaTable(Model, Queryable, AuditMixinNullable):
"""An ORM object for SqlAlchemy table references"""
type = "table"
__tablename__ = 'tables'
id = Column(Integer, primary_key=True)
table_name = Column(String(250))
main_dttm_col = Column(String(250))
description = Column(Text)
default_endpoint = Column(Text)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
is_featured = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='tables', foreign_keys=[user_id])
database = relationship(
'Database', backref='tables', foreign_keys=[database_id])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
schema = Column(String(255))
sql = Column(Text)
table_columns = relationship("TableColumn", back_populates="table")
baselink = "tablemodelview"
__table_args__ = (
sqla.UniqueConstraint(
'database_id', 'schema', 'table_name',
name='_customer_location_uc'),)
def __repr__(self):
return self.table_name
@property
def description_markeddown(self):
return utils.markdown(self.description)
@property
def link(self):
table_name = escape(self.table_name)
return Markup(
'<a href="{self.explore_url}">{table_name}</a>'.format(**locals()))
@property
def perm(self):
return (
"[{obj.database}].[{obj.table_name}]"
"(id:{obj.id})").format(obj=self)
@property
def name(self):
return self.table_name
@property
def full_name(self):
return "[{obj.database}].[{obj.table_name}]".format(obj=self)
@property
def dttm_cols(self):
l = [c.column_name for c in self.columns if c.is_dttm]
if self.main_dttm_col not in l:
l.append(self.main_dttm_col)
return l
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def any_dttm_col(self):
cols = self.dttm_cols
if cols:
return cols[0]
@property
def html(self):
t = ((c.column_name, c.type) for c in self.columns)
df = pd.DataFrame(t)
df.columns = ['field', 'type']
return df.to_html(
index=False,
classes=(
"dataframe table table-striped table-bordered "
"table-condensed"))
@property
def name(self):
return self.table_name
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def sql_url(self):
return self.database.sql_url + "?table_name=" + str(self.table_name)
def get_col(self, col_name):
columns = self.table_columns
for col in columns:
if col_name == col.column_name:
return col
def query( # sqla
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=15, row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None,
columns=None):
"""Querying any sqla table from this common interface"""
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
cols = {col.column_name: col for col in self.columns}
metrics_dict = {m.metric_name: m for m in self.metrics}
qry_start_dttm = datetime.now()
if not granularity and is_timeseries:
raise Exception(_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"))
metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]
if metrics:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr = literal_column("COUNT(*)").label("ccount")
select_exprs = []
groupby_exprs = []
if groupby:
select_exprs = []
inner_select_exprs = []
inner_groupby_exprs = []
for s in groupby:
col = cols[s]
outer = col.sqla_col
inner = col.sqla_col.label(col.column_name + '__')
groupby_exprs.append(outer)
select_exprs.append(outer)
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
elif columns:
for s in columns:
select_exprs.append(cols[s].sqla_col)
metrics_exprs = []
if granularity:
# TODO: sqlalchemy 1.2 release should be doing this on its own.
# Patch only if the column clause is specific for DateTime set and
# granularity is selected.
@compiles(ColumnClause)
def _(element, compiler, **kw):
text = compiler.visit_column(element, **kw)
try:
if element.is_literal and hasattr(element.type, 'python_type') and \
type(element.type) is DateTime:
text = text.replace('%%', '%')
except NotImplementedError:
pass # Some elements raise NotImplementedError for python_type
return text
dttm_col = cols[granularity]
dttm_expr = dttm_col.sqla_col.label('timestamp')
timestamp = dttm_expr
# Transforming time grain into an expression based on configuration
time_grain_sqla = extras.get('time_grain_sqla')
if time_grain_sqla:
if dttm_col.python_date_format == 'epoch_s':
dttm_expr = self.database.epoch_to_dttm().format(
col=dttm_expr)
elif dttm_col.python_date_format == 'epoch_ms':
dttm_expr = self.database.epoch_to_dttm(ms=True).format(
col=dttm_expr)
udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
timestamp_grain = literal_column(
udf.function.format(col=dttm_expr), type_=DateTime).label('timestamp')
else:
timestamp_grain = timestamp
if is_timeseries:
select_exprs += [timestamp_grain]
groupby_exprs += [timestamp_grain]
outer_from = text(dttm_col.dttm_sql_literal(from_dttm))
outer_to = text(dttm_col.dttm_sql_literal(to_dttm))
time_filter = [
timestamp >= outer_from,
timestamp <= outer_to,
]
inner_time_filter = copy(time_filter)
if inner_from_dttm:
inner_time_filter[0] = timestamp >= text(
dttm_col.dttm_sql_literal(inner_from_dttm))
if inner_to_dttm:
inner_time_filter[1] = timestamp <= text(
dttm_col.dttm_sql_literal(inner_to_dttm))
else:
inner_time_filter = []
select_exprs += metrics_exprs
qry = select(select_exprs)
tbl = table(self.table_name)
if self.schema:
tbl.schema = self.schema
# Supporting arbitrary SQL statements in place of tables
if self.sql:
tbl = text('(' + self.sql + ') as expr_qry ')
if not columns:
qry = qry.group_by(*groupby_exprs)
where_clause_and = []
having_clause_and = []
for col, op, eq in filter:
col_obj = cols[col]
if op in ('in', 'not in'):
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '').strip() for types in splitted]
cond = col_obj.sqla_col.in_(values)
if op == 'not in':
cond = ~cond
where_clause_and.append(cond)
if extras and 'where' in extras:
where_clause_and += [text(extras['where'])]
if extras and 'having' in extras:
having_clause_and += [text(extras['having'])]
if granularity:
qry = qry.where(and_(*(time_filter + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if groupby:
qry = qry.order_by(desc(main_metric_expr))
elif orderby:
for col, ascending in orderby:
direction = asc if ascending else desc
qry = qry.order_by(direction(col))
qry = qry.limit(row_limit)
if timeseries_limit and groupby:
# some sql dialects require for order by expressions
# to also be in the select clause
inner_select_exprs += [main_metric_expr]
subq = select(inner_select_exprs)
subq = subq.select_from(tbl)
subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
subq = subq.group_by(*inner_groupby_exprs)
subq = subq.order_by(desc(main_metric_expr))
subq = subq.limit(timeseries_limit)
on_clause = []
for i, gb in enumerate(groupby):
on_clause.append(
groupby_exprs[i] == column(gb + '__'))
tbl = tbl.join(subq.alias(), and_(*on_clause))
qry = qry.select_from(tbl)
engine = self.database.get_sqla_engine()
sql = "{}".format(
qry.compile(
engine, compile_kwargs={"literal_binds": True},),
)
df = pd.read_sql_query(
sql=sql,
con=engine
)
sql = sqlparse.format(sql, reindent=True)
return QueryResult(
df=df, duration=datetime.now() - qry_start_dttm, query=sql)
def get_sqla_table_object(self):
return self.database.get_table(self.table_name, schema=self.schema)
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception:
raise Exception(
"Table doesn't seem to exist in the specified database, "
"couldn't fetch column information")
TC = TableColumn # noqa shortcut to class
M = SqlMetric # noqa
metrics = []
any_date_col = None
for col in table.columns:
try:
datatype = "{}".format(col.type).upper()
except Exception as e:
datatype = "UNKNOWN"
logging.error(
"Unrecognized data type in {}.{}".format(table, col.name))
logging.exception(e)
dbcol = (
db.session
.query(TC)
.filter(TC.table == self)
.filter(TC.column_name == col.name)
.first()
)
db.session.flush()
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.groupby = dbcol.is_string
dbcol.filterable = dbcol.is_string
dbcol.sum = dbcol.isnum
dbcol.is_dttm = dbcol.is_time
db.session.merge(self)
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
quoted = "{}".format(
column(dbcol.column_name).compile(dialect=db.engine.dialect))
if dbcol.sum:
metrics.append(M(
metric_name='sum__' + dbcol.column_name,
verbose_name='sum__' + dbcol.column_name,
metric_type='sum',
expression="SUM({})".format(quoted)
))
if dbcol.max:
metrics.append(M(
metric_name='max__' + dbcol.column_name,
verbose_name='max__' + dbcol.column_name,
metric_type='max',
expression="MAX({})".format(quoted)
))
if dbcol.min:
metrics.append(M(
metric_name='min__' + dbcol.column_name,
verbose_name='min__' + dbcol.column_name,
metric_type='min',
expression="MIN({})".format(quoted)
))
if dbcol.count_distinct:
metrics.append(M(
metric_name='count_distinct__' + dbcol.column_name,
verbose_name='count_distinct__' + dbcol.column_name,
metric_type='count_distinct',
expression="COUNT(DISTINCT {})".format(quoted)
))
dbcol.type = datatype
db.session.merge(self)
db.session.commit()
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression="COUNT(*)"
))
for metric in metrics:
m = (
db.session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.table_id == self.id)
.first()
)
metric.table_id = self.id
if not m:
db.session.add(metric)
db.session.commit()
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
class SqlMetric(Model, AuditMixinNullable):
"""ORM object for metrics, each table can have multiple metrics"""
__tablename__ = 'sql_metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='metrics', foreign_keys=[table_id])
expression = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def sqla_col(self):
name = self.metric_name
return literal_column(self.expression).label(name)
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.table.full_name) if self.table else None
class TableColumn(Model, AuditMixinNullable):
"""ORM object for table columns, each table can have multiple columns"""
__tablename__ = 'table_columns'
id = Column(Integer, primary_key=True)
table_id = Column(Integer, ForeignKey('tables.id'))
table = relationship(
'SqlaTable', backref='columns', foreign_keys=[table_id])
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_dttm = Column(Boolean, default=False)
is_active = Column(Boolean, default=True)
type = Column(String(32), default='')
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
expression = Column(Text, default='')
description = Column(Text, default='')
python_date_format = Column(String(255))
database_expression = Column(String(255))
num_types = ('DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'LONG')
date_types = ('DATE', 'TIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
def __repr__(self):
return self.column_name
@property
def isnum(self):
return any([t in self.type.upper() for t in self.num_types])
@property
def is_time(self):
return any([t in self.type.upper() for t in self.date_types])
@property
def is_string(self):
return any([t in self.type.upper() for t in self.str_types])
@property
def sqla_col(self):
name = self.column_name
if not self.expression:
col = column(self.column_name).label(name)
else:
col = literal_column(self.expression).label(name)
return col
def dttm_sql_literal(self, dttm):
"""Convert datetime object to string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf == 'epoch_s':
return str((dttm - datetime(1970, 1, 1)).total_seconds())
elif tf == 'epoch_ms':
return str((dttm - datetime(1970, 1, 1)).total_seconds()*1000.0)
else:
default = "'{}'".format(dttm.strftime(tf))
iso = dttm.isoformat()
d = {
'mssql': "CONVERT(DATETIME, '{}', 126)".format(iso), # untested
'mysql': default,
'oracle':
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""".format(
dttm.isoformat()),
'presto': default,
'sqlite': default,
}
for k, v in d.items():
if self.table.database.sqlalchemy_uri.startswith(k):
return v
return default
class DruidCluster(Model, AuditMixinNullable):
"""ORM object referencing the Druid clusters"""
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
cluster_name = Column(String(250), unique=True)
coordinator_host = Column(String(255))
coordinator_port = Column(Integer)
coordinator_endpoint = Column(
String(255), default='druid/coordinator/v1/metadata')
broker_host = Column(String(255))
broker_port = Column(Integer)
broker_endpoint = Column(String(255), default='druid/v2')
metadata_last_refreshed = Column(DateTime)
def __repr__(self):
return self.cluster_name
def get_pydruid_client(self):
cli = PyDruid(
"http://{0}:{1}/".format(self.broker_host, self.broker_port),
self.broker_endpoint)
return cli
def get_datasources(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/"
"{obj.coordinator_endpoint}/datasources"
).format(obj=self)
return json.loads(requests.get(endpoint).text)
def get_druid_version(self):
endpoint = (
"http://{obj.coordinator_host}:{obj.coordinator_port}/status"
).format(obj=self)
return json.loads(requests.get(endpoint).text)['version']
def refresh_datasources(self):
self.druid_version = self.get_druid_version()
for datasource in self.get_datasources():
if datasource not in config.get('DRUID_DATA_SOURCE_BLACKLIST'):
DruidDatasource.sync_to_db(datasource, self)
@property
def perm(self):
return "[{obj.cluster_name}].(id:{obj.id})".format(obj=self)
class DruidDatasource(Model, AuditMixinNullable, Queryable):
"""ORM object referencing Druid datasources (tables)"""
type = "druid"
baselink = "druiddatasourcemodelview"
__tablename__ = 'datasources'
id = Column(Integer, primary_key=True)
datasource_name = Column(String(255), unique=True)
is_featured = Column(Boolean, default=False)
is_hidden = Column(Boolean, default=False)
description = Column(Text)
default_endpoint = Column(Text)
user_id = Column(Integer, ForeignKey('ab_user.id'))
owner = relationship('User', backref='datasources', foreign_keys=[user_id])
cluster_name = Column(
String(250), ForeignKey('clusters.cluster_name'))
cluster = relationship(
'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
@property
def metrics_combo(self):
return sorted(
[(m.metric_name, m.verbose_name) for m in self.metrics],
key=lambda x: x[1])
@property
def num_cols(self):
return [c.column_name for c in self.columns if c.isnum]
@property
def name(self):
return self.datasource_name
@property
def perm(self):
return (
"[{obj.cluster_name}].[{obj.datasource_name}]"
"(id:{obj.id})").format(obj=self)
@property
def link(self):
name = escape(self.datasource_name)
return Markup('<a href="{self.url}">{name}</a>').format(**locals())
@property
def full_name(self):
return (
"[{obj.cluster_name}]."
"[{obj.datasource_name}]").format(obj=self)
def __repr__(self):
return self.datasource_name
@renders('datasource_name')
def datasource_link(self):
url = "/caravel/explore/{obj.type}/{obj.id}/".format(obj=self)
name = escape(self.datasource_name)
return Markup('<a href="{url}">{name}</a>'.format(**locals()))
def get_metric_obj(self, metric_name):
return [
m.json_obj for m in self.metrics
if m.metric_name == metric_name
][0]
@staticmethod
def version_higher(v1, v2):
"""is v1 higher than v2
>>> DruidDatasource.version_higher('0.8.2', '0.9.1')
False
>>> DruidDatasource.version_higher('0.8.2', '0.6.1')
True
>>> DruidDatasource.version_higher('0.8.2', '0.8.2')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9.BETA')
False
>>> DruidDatasource.version_higher('0.8.2', '0.9')
False
"""
def int_or_0(v):
try:
v = int(v)
except (TypeError, ValueError):
v = 0
return v
v1nums = [int_or_0(n) for n in v1.split('.')]
v2nums = [int_or_0(n) for n in v2.split('.')]
v1nums = (v1nums + [0, 0, 0])[:3]
v2nums = (v2nums + [0, 0, 0])[:3]
return v1nums[0] > v2nums[0] or \
(v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
(v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > v2nums[2])
def latest_metadata(self):
"""Returns segment metadata from the latest segment"""
client = self.cluster.get_pydruid_client()
results = client.time_boundary(datasource=self.datasource_name)
if not results:
return
max_time = results[0]['result']['maxTime']
max_time = parse(max_time)
# Query segmentMetadata for 7 days back. However, due to a bug,
# we need to set this interval to more than 1 day ago to exclude
# realtime segments, which trigged a bug (fixed in druid 0.8.2).
# https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
start = (0 if self.version_higher(self.cluster.druid_version, '0.8.2') else 1)
intervals = (max_time - timedelta(days=7)).isoformat() + '/'
intervals += (max_time - timedelta(days=start)).isoformat()
segment_metadata = client.segment_metadata(
datasource=self.datasource_name,
intervals=intervals)
if segment_metadata:
return segment_metadata[-1]['columns']
def generate_metrics(self):
for col in self.columns:
col.generate_metrics()
@classmethod
def sync_to_db_from_config(cls, druid_config, user, cluster):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session()
datasource = (
session.query(DruidDatasource)
.filter_by(
datasource_name=druid_config['name'])
).first()
# Create a new datasource.
if not datasource:
datasource = DruidDatasource(
datasource_name=druid_config['name'],
cluster=cluster,
owner=user,
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
dimensions = druid_config['dimensions']
for dim in dimensions:
col_obj = (
session.query(DruidColumn)
.filter_by(
datasource_name=druid_config['name'],
column_name=dim)
).first()
if not col_obj:
col_obj = DruidColumn(
datasource_name=druid_config['name'],
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type="STRING",
datasource=datasource
)
session.add(col_obj)
# Import Druid metrics
for metric_spec in druid_config["metrics_spec"]:
metric_name = metric_spec["name"]
metric_type = metric_spec["type"]
metric_json = json.dumps(metric_spec)
if metric_type == "count":
metric_type = "longSum"
metric_json = json.dumps({
"type": "longSum",
"name": metric_name,
"fieldName": metric_name,
})
metric_obj = (
session.query(DruidMetric)
.filter_by(
datasource_name=druid_config['name'],
metric_name=metric_name)
).first()
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name="%s(%s)" % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
"Imported from the airolap config dir for %s" %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
@classmethod
def sync_to_db(cls, name, cluster):
"""Fetches metadata for that datasource and merges the Caravel db"""
logging.info("Syncing Druid datasource [{}]".format(name))
session = get_session()
datasource = session.query(cls).filter_by(datasource_name=name).first()
if not datasource:
datasource = cls(datasource_name=name)
session.add(datasource)
flasher("Adding new datasource [{}]".format(name), "success")
else:
flasher("Refreshing datasource [{}]".format(name), "info")
session.flush()
datasource.cluster = cluster
session.flush()
cols = datasource.latest_metadata()
if not cols:
return
for col in cols:
col_obj = (
session
.query(DruidColumn)
.filter_by(datasource_name=name, column_name=col)
.first()
)
datatype = cols[col]['type']
if not col_obj:
col_obj = DruidColumn(datasource_name=name, column_name=col)
session.add(col_obj)
if datatype == "STRING":
col_obj.groupby = True
col_obj.filterable = True
if datatype == "hyperUnique" or datatype == "thetaSketch":
col_obj.count_distinct = True
if col_obj:
col_obj.type = cols[col]['type']
session.flush()
col_obj.datasource = datasource
col_obj.generate_metrics()
session.flush()
def query( # druid
self, groupby, metrics,
granularity,
from_dttm, to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None, inner_to_dttm=None,
orderby=None,
extras=None, # noqa
select=None, # noqa
columns=None, ):
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
all_metrics = []
post_aggs = {}
def recursive_get_fields(_conf):
_fields = _conf.get('fields', [])
field_names = []
for _f in _fields:
_type = _f.get('type')
if _type in ['fieldAccess', 'hyperUniqueCardinality']:
field_names.append(_f.get('fieldName'))
elif _type == 'arithmetic':
field_names += recursive_get_fields(_f)
return list(set(field_names))
for metric_name in metrics:
metric = metrics_dict[metric_name]
if metric.metric_type != 'postagg':
all_metrics.append(metric_name)
else:
conf = metric.json_obj
all_metrics += recursive_get_fields(conf)
all_metrics += conf.get('fieldNames', [])
if conf.get('type') == 'javascript':
post_aggs[metric_name] = JavascriptPostAggregator(
name=conf.get('name'),
field_names=conf.get('fieldNames'),
function=conf.get('function'))
else:
post_aggs[metric_name] = Postaggregator(
conf.get('fn', "/"),
conf.get('fields', []),
conf.get('name', ''))
aggregations = {
m.metric_name: m.json_obj
for m in self.metrics
if m.metric_name in all_metrics
}
rejected_metrics = [
m.metric_name for m in self.metrics
if m.is_restricted and
m.metric_name in aggregations.keys() and
not sm.has_access('metric_access', m.perm)
]
if rejected_metrics:
raise MetricPermException(
"Access to the metrics denied: " + ', '.join(rejected_metrics)
)
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(
granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
origin = extras.get('druid_time_origin')
if origin:
dttm = utils.parse_human_datetime(origin)
granularity['origin'] = dttm.isoformat()
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
post_aggregations=post_aggs,
intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
)
filters = self.get_filters(filter)
if filters:
qry['filter'] = filters
having_filters = self.get_having_filters(extras.get('having_druid'))
if having_filters:
qry['having'] = having_filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry['granularity'] = "all"
pre_qry['limit_spec'] = {
"type": "default",
"limit": timeseries_limit,
'intervals': (
inner_from_dttm.isoformat() + '/' +
inner_to_dttm.isoformat()),
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry['dimensions']
filters = []
for unused, row in df.iterrows():
fields = []
for dim in dims:
f = Dimension(dim) == row[dim]
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(filt)
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry['filter'] = ff
else:
qry['filter'] = Filter(type="and", fields=[
ff,
orig_filters])
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {
"type": "default",
"limit": row_limit,
"columns": [{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}],
}
client.groupby(**qry)
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception(_("No data was returned."))
if (
not is_timeseries and
granularity == "all" and
'timestamp' in df.columns):
del df['timestamp']
# Reordering columns
cols = []
if 'timestamp' in df.columns:
cols += ['timestamp']
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
df = df[cols]
return QueryResult(
df=df,
query=query_str,
duration=datetime.now() - qry_start_dttm)
@staticmethod
def get_filters(raw_filters):
filters = None
for col, op, eq in raw_filters:
cond = None
if op == '==':
cond = Dimension(col) == eq
elif op == '!=':
cond = ~(Dimension(col) == eq)
elif op in ('in', 'not in'):
fields = []
# Distinguish quoted values with regular value types
splitted = FillterPattern.split(eq)[1::2]
values = [types.replace("'", '') for types in splitted]
if len(values) > 1:
for s in values:
s = s.strip()
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == 'not in':
cond = ~cond
elif op == 'regex':
cond = Filter(type="regex", pattern=eq, dimension=col)
if filters:
filters = Filter(type="and", fields=[
cond,
filters
])
else:
filters = cond
return filters
def _get_having_obj(self, col, op, eq):
cond = None
if op == '==':
if col in self.column_names:
cond = DimSelector(dimension=col, value=eq)
else:
cond = Aggregation(col) == eq
elif op == '>':
cond = Aggregation(col) > eq
elif op == '<':
cond = Aggregation(col) < eq
return cond
def get_having_filters(self, raw_filters):
filters = None
reversed_op_map = {
'!=': '==',
'>=': '<',
'<=': '>'
}
for col, op, eq in raw_filters:
cond = None
if op in ['==', '>', '<']:
cond = self._get_having_obj(col, op, eq)
elif op in reversed_op_map:
cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
if filters:
filters = filters & cond
else:
filters = cond
return filters
class Log(Model):
"""ORM object used to log Caravel actions to the database"""
__tablename__ = 'logs'
id = Column(Integer, primary_key=True)
action = Column(String(512))
user_id = Column(Integer, ForeignKey('ab_user.id'))
dashboard_id = Column(Integer)
slice_id = Column(Integer)
json = Column(Text)
user = relationship('User', backref='logs', foreign_keys=[user_id])
dttm = Column(DateTime, default=func.now())
dt = Column(Date, default=date.today())
@classmethod
def log_this(cls, f):
"""Decorator to log user actions"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
user_id = None
if g.user:
user_id = g.user.get_id()
d = request.args.to_dict()
d.update(kwargs)
slice_id = d.get('slice_id', 0)
try:
slice_id = int(slice_id) if slice_id else 0
except ValueError:
slice_id = 0
params = ""
try:
params = json.dumps(d)
except:
pass
log = cls(
action=f.__name__,
json=params,
dashboard_id=d.get('dashboard_id') or None,
slice_id=slice_id,
user_id=user_id)
db.session.add(log)
db.session.commit()
return f(*args, **kwargs)
return wrapper
class DruidMetric(Model, AuditMixinNullable):
"""ORM object referencing Druid metrics for a datasource"""
__tablename__ = 'metrics'
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='metrics',
enable_typechecks=False)
json = Column(Text)
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
@property
def json_obj(self):
try:
obj = json.loads(self.json)
except Exception:
obj = {}
return obj
@property
def perm(self):
return (
"{parent_name}.[{obj.metric_name}](id:{obj.id})"
).format(obj=self,
parent_name=self.datasource.full_name
) if self.datasource else None
class DruidColumn(Model, AuditMixinNullable):
"""ORM model for storing Druid datasource column metadata"""
__tablename__ = 'columns'
id = Column(Integer, primary_key=True)
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
# Setting enable_typechecks=False disables polymorphic inheritance.
datasource = relationship('DruidDatasource', backref='columns',
enable_typechecks=False)
column_name = Column(String(255))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
def __repr__(self):
return self.column_name
@property
def isnum(self):
return self.type in ('LONG', 'DOUBLE', 'FLOAT', 'INT')
def generate_metrics(self):
"""Generate metrics based on the column metadata"""
M = DruidMetric # noqa
metrics = []
metrics.append(DruidMetric(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'count'})
))
# Somehow we need to reassign this for UDAFs
if self.type in ('DOUBLE', 'FLOAT'):
corrected_type = 'DOUBLE'
else:
corrected_type = self.type
if self.sum and self.isnum:
mt = corrected_type.lower() + 'Sum'
name = 'sum__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='sum',
verbose_name='SUM({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.min and self.isnum:
mt = corrected_type.lower() + 'Min'
name = 'min__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='min',
verbose_name='MIN({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.max and self.isnum:
mt = corrected_type.lower() + 'Max'
name = 'max__' + self.column_name
metrics.append(DruidMetric(
metric_name=name,
metric_type='max',
verbose_name='MAX({})'.format(self.column_name),
json=json.dumps({
'type': mt, 'name': name, 'fieldName': self.column_name})
))
if self.count_distinct:
name = 'count_distinct__' + self.column_name
if self.type == 'hyperUnique' or self.type == 'thetaSketch':
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type=self.type,
json=json.dumps({
'type': self.type,
'name': name,
'fieldName': self.column_name
})
))
else:
mt = 'count_distinct'
metrics.append(DruidMetric(
metric_name=name,
verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
metric_type='count_distinct',
json=json.dumps({
'type': 'cardinality',
'name': name,
'fieldNames': [self.column_name]})
))
session = get_session()
new_metrics = []
for metric in metrics:
m = (
session.query(M)
.filter(M.metric_name == metric.metric_name)
.filter(M.datasource_name == self.datasource_name)
.filter(DruidCluster.cluster_name == self.datasource.cluster_name)
.first()
)
metric.datasource_name = self.datasource_name
if not m:
new_metrics.append(metric)
session.add(metric)
session.flush()
utils.init_metrics_perm(caravel, new_metrics)
class FavStar(Model):
__tablename__ = 'favstar'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('ab_user.id'))
class_name = Column(String(50))
obj_id = Column(Integer)
dttm = Column(DateTime, default=func.now())
class QueryStatus:
CANCELLED = 'cancelled'
FAILED = 'failed'
PENDING = 'pending'
RUNNING = 'running'
SCHEDULED = 'scheduled'
SUCCESS = 'success'
TIMED_OUT = 'timed_out'
class Query(Model):
"""ORM model for SQL query"""
__tablename__ = 'query'
id = Column(Integer, primary_key=True)
client_id = Column(String(11), unique=True)
database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
# Store the tmp table into the DB only if the user asks for it.
tmp_table_name = Column(String(256))
user_id = Column(
Integer, ForeignKey('ab_user.id'), nullable=True)
status = Column(String(16), default=QueryStatus.PENDING)
tab_name = Column(String(256))
sql_editor_id = Column(String(256))
schema = Column(String(256))
sql = Column(Text)
# Query to retrieve the results,
# used only in case of select_as_cta_used is true.
select_sql = Column(Text)
executed_sql = Column(Text)
# Could be configured in the caravel config.
limit = Column(Integer)
limit_used = Column(Boolean, default=False)
select_as_cta = Column(Boolean)
select_as_cta_used = Column(Boolean, default=False)
progress = Column(Integer, default=0) # 1..100
# # of rows in the result set or rows modified.
rows = Column(Integer)
error_message = Column(Text)
# Using Numeric in place of DateTime for sub-second precision
# stored as seconds since epoch, allowing for milliseconds
start_time = Column(Numeric(precision=3))
end_time = Column(Numeric(precision=3))
changed_on = Column(
DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=True)
database = relationship(
'Database', foreign_keys=[database_id], backref='queries')
__table_args__ = (
sqla.Index('ti_user_id_changed_on', user_id, changed_on),
)
def to_dict(self):
return {
'changedOn': self.changed_on,
'changed_on': self.changed_on.isoformat(),
'dbId': self.database_id,
'endDttm': self.end_time,
'errorMessage': self.error_message,
'executedSql': self.executed_sql,
'id': self.client_id,
'limit': self.limit,
'progress': self.progress,
'rows': self.rows,
'schema': self.schema,
'ctas': self.select_as_cta,
'serverId': self.id,
'sql': self.sql,
'sqlEditorId': self.sql_editor_id,
'startDttm': self.start_time,
'state': self.status.lower(),
'tab': self.tab_name,
'tempTable': self.tmp_table_name,
'userId': self.user_id,
}
@property
def name(self):
ts = datetime.now().isoformat()
ts = ts.replace('-', '').replace(':', '').split('.')[0]
tab = self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab'
tab = re.sub(r'\W+', '', tab)
return "sqllab_{tab}_{ts}".format(**locals())
class DatasourceAccessRequest(Model, AuditMixinNullable):
"""ORM model for the access requests for datasources and dbs."""
__tablename__ = 'access_request'
id = Column(Integer, primary_key=True)
datasource_id = Column(Integer)
datasource_type = Column(String(200))
ROLES_BLACKLIST = set(['Admin', 'Alpha', 'Gamma', 'Public'])
@property
def cls_model(self):
return SourceRegistry.sources[self.datasource_type]
@property
def username(self):
return self.creator()
@property
def datasource(self):
return self.get_datasource
@datasource.getter
@utils.memoized
def get_datasource(self):
ds = db.session.query(self.cls_model).filter_by(
id=self.datasource_id).first()
return ds
@property
def datasource_link(self):
return self.datasource.link
@property
def roles_with_datasource(self):
action_list = ''
pv = sm.find_permission_view_menu(
'datasource_access', self.datasource.perm)
for r in pv.role:
if r.name in self.ROLES_BLACKLIST:
continue
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_grant={r.name}'
.format(**locals())
)
href = '<a href="{}">Grant {} Role</a>'.format(url, r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
@property
def user_roles(self):
action_list = ''
for r in self.created_by.roles:
url = (
'/caravel/approve?datasource_type={self.datasource_type}&'
'datasource_id={self.datasource_id}&'
'created_by={self.created_by.username}&role_to_extend={r.name}'
.format(**locals())
)
href = '<a href="{}">Extend {} Role</a>'.format(url, r.name)
if r.name in self.ROLES_BLACKLIST:
href = "{} Role".format(r.name)
action_list = action_list + '<li>' + href + '</li>'
return '<ul>' + action_list + '</ul>'
class EchartMapType(Model):
"""
the map tile file object!
"""
__tablename__ = "echart_map_type"
id = Column(Integer, primary_key=True)
file = Column(FileColumn, nullable=False)
map_name = Column(String(150))
def download(self):
return Markup(
'<a href="' + url_for('EchartMapTypeModelView.download', filename=str(self.file)) + '">Download</a>')
def file_name(self):
return get_file_original_name(str(self.file))
@property
def map_url(self):
return url_for('EchartMapTypeModelView.download', filename=str(self.file)) | wbsljh/caravel | caravel/models.py | Python | apache-2.0 | 73,950 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
This file contains various constants and helper code to generate constants
that are used in the Statistical Variable renaming.
"""
import pandas as pd
import collections
import re
def capitalizeFirst(word):
""" Capitalizes the first letter of a string. """
return word[0].upper() + word[1:]
def standard_name_remapper(orig_name):
""" General renaming function for long strings into Pascal case.
Text inbetween trailing parentheses is removed.
Commas, dashes, and "ands" are removed. Then string is converted into Pascal
case without and spaces present.
"""
# Remove any trailing parentheses.
# TODO(tjann): to check if this is safe.
paren_start = orig_name.find("(")
if paren_start != -1:
orig_name = orig_name[:paren_start]
# Removes separating words.
orig_name = orig_name.replace(",", " ")
orig_name = orig_name.replace("-", " ")
orig_name = orig_name.replace("and ", "")
return "".join([word.capitalize() for word in orig_name.split()])
def _create_naics_map():
""" Downloads all NAICS codes across long and short form codes. """
# Read in list of industry topics.
naics_codes = pd.read_excel(
"https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx"
)
naics_codes = naics_codes.iloc[:, [1, 2]]
naics_codes.columns = ['NAICSCode', 'Title']
# Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33.
def range_to_array(read_code):
if isinstance(read_code, str) and "-" in read_code:
lower, upper = read_code.split("-")
return list(range(int(lower), int(upper) + 1))
return read_code
naics_codes = naics_codes.dropna()
naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array)
naics_codes = naics_codes.explode('NAICSCode')
# Add unclassified code which is used in some statistical variables.
naics_codes = naics_codes.append(
{
"NAICSCode": 99,
"Title": "Nonclassifiable"
}, ignore_index=True)
# Query for only two digit codes.
short_codes = naics_codes[naics_codes['NAICSCode'] < 100]
short_codes = short_codes.set_index("NAICSCode")
short_codes = short_codes['Title'].to_dict()
# Read in overview codes.
overview_codes = pd.read_csv(
"https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv"
)
overview_codes.columns = ["NAICSCode", "Title"]
overview_codes = overview_codes.set_index("NAICSCode")
overview_codes = overview_codes['Title'].to_dict()
# Combine the two sources of codes.
NAICS_MAP = {}
combined_codes = short_codes
combined_codes.update(overview_codes)
# Rename industries into Pascal case.
for code, orig_name in combined_codes.items():
NAICS_MAP[str(code)] = standard_name_remapper(orig_name)
# Other edge cases.
NAICS_MAP['00'] = 'Unclassified'
return NAICS_MAP
# TODO(iancostello): Consider adding function memoization.
NAICS_MAP = _create_naics_map()
### True Constants
# Template of Stat Var MCF.
TEMPLATE_STAT_VAR = """
Node: dcid:{human_readable_dcid}
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
{CONSTRAINTS}"""
# Main query for stat vars. Combines across populations and observations
# to create statistical variables.
QUERY_FOR_ALL_STAT_VARS = """
SELECT DISTINCT
SP.population_type as populationType,
{CONSTRAINTS}
{POPULATIONS}
O.measurement_qualifier AS measurementQualifier,
O.measurement_denominator as measurementDenominator,
O.measured_prop as measuredProp,
O.unit as unit,
O.scaling_factor as scalingFactor,
O.measurement_method as measurementMethod,
SP.num_constraints as numConstraints,
CASE
WHEN O.measured_value IS NOT NULL THEN "measuredValue"
WHEN O.sum_value IS NOT NULL THEN "sumValue"
WHEN O.mean_value IS NOT NULL THEN "meanValue"
WHEN O.min_value IS NOT NULL THEN "minValue"
WHEN O.max_value IS NOT NULL THEN "maxValue"
WHEN O.std_deviation_value IS NOT NULL THEN "stdDeviationValue"
WHEN O.growth_rate IS NOT NULL THEN "growthRate"
WHEN O.median_value IS NOT NULL THEN "medianValue"
ELSE "Unknown"
END AS statType
FROM
`google.com:datcom-store-dev.dc_v3_clustered.StatisticalPopulation`
AS SP JOIN
`google.com:datcom-store-dev.dc_v3_clustered.Observation`
AS O
ON (SP.id = O.observed_node_key)
WHERE
O.type <> "ComparativeObservation"
AND SP.is_public
AND SP.prov_id NOT IN ({comma_sep_prov_blacklist})
"""
# Dataset blacklist.
_BIO_DATASETS = frozenset([
'dc/p47rsv3', # UniProt
'dc/0cwj4g1', # FDA_Pharmacologic_Class
'dc/5vxrbh3', # SIDER
'dc/ff08ks', # Gene_NCBI
'dc/rhjyj31', # MedicalSubjectHeadings
'dc/jd648v2', # GeneticVariantClinVar
'dc/x8m41b1', # ChEMBL
'dc/vbyjkh3', # SPOKESymptoms
'dc/gpv9pl2', # DiseaseOntology
'dc/8nwtbj2', # GTExSample0
'dc/t5lx1e2', # GTExSample2
'dc/kz0q1c2', # GTExSample1
'dc/8xcvhx', # GenomeAssemblies
'dc/hgp9hn1', # Species
'dc/9llzsx1', # GeneticVariantUCSC
'dc/f1fxve1', # Gene_RNATranscript_UCSC
'dc/mjgrfc', # Chromosome
'dc/h2lkz1', # ENCODEProjectSample
])
_MISC_DATASETS = frozenset([
'dc/93qydx3', # NYBG
'dc/g3rq1f1', # DeepSolar
'dc/22t2hr3', # EIA_860
'dc/zkhvp12', # OpportunityInsightsOutcomes
'dc/89fk9x3', # CollegeScorecard
])
# List of constraint prefixes to remove from certain properties.
CONSTRAINT_PREFIXES_TO_STRIP = {
'nativity': 'USC',
'age': 'USC',
'institutionalization': 'USC',
'educationStatus': 'USC',
'povertyStatus': 'USC',
'workExperience': 'USC',
'nativity': 'USC',
'race': ['USC', 'CDC', 'DAD'],
'employment': ['USC', 'BLS'],
'employmentStatus': ['USC', 'BLS'],
'schoolGradeLevel': 'NCES',
'patientRace': 'DAD'
}
# List of drug renamings. Note that some drugs are intentionally excluded.
DRUG_REMAPPINGS = {
'drug/dea/1100': 'Amphetamine',
'drug/dea/1105B': 'DlMethamphetamine',
'drug/dea/1105D': 'DMethamphetamine',
'drug/dea/1205': 'Lisdexamfetamine',
'drug/dea/1248': 'Mephedrone',
'drug/dea/1615': 'Phendimetrazine',
'drug/dea/1724': 'Methylphenidate',
'drug/dea/2010': 'GammaHydroxybutyricAcid',
'drug/dea/2012': 'FDAApprovedGammaHydroxybutyricAcidPreparations',
'drug/dea/2100': 'BarbituricAcidDerivativeOrSalt',
'drug/dea/2125': 'Amobarbital',
'drug/dea/2165': 'Butalbital',
'drug/dea/2270': 'Pentobarbital', # Intentionally duplicated
'drug/dea/2285': 'Phenobarbital', #
'drug/dea/2315': 'Secobarbital',
'drug/dea/2765': 'Diazepam',
'drug/dea/2783': 'Zolpidem',
'drug/dea/2885': 'Lorazepam',
'drug/dea/4000': 'AnabolicSteroids',
'drug/dea/4187': 'Testosterone',
'drug/dea/7285': 'Ketamine',
'drug/dea/7315D': 'Lysergide',
'drug/dea/7365': 'MarketableOralDronabinol',
'drug/dea/7369': 'DronabinolGelCapsule',
'drug/dea/7370': 'Tetrahydrocannabinol',
'drug/dea/7377': 'Cannabicyclol',
'drug/dea/7379': 'Nabilone',
'drug/dea/7381': 'Mescaline',
'drug/dea/7400': '34Methylenedioxyamphetamine',
'drug/dea/7431': '5MethoxyNNDimethyltryptamine',
'drug/dea/7433': 'Bufotenine',
'drug/dea/7437': 'Psilocybin',
'drug/dea/7438': 'Psilocin',
'drug/dea/7455': 'PCE',
'drug/dea/7471': 'Phencyclidine',
'drug/dea/7540': 'Methylone',
'drug/dea/9010': 'Alphaprodine',
'drug/dea/9020': 'Anileridine',
'drug/dea/9041L': 'Cocaine',
'drug/dea/9046': 'Norcocaine',
'drug/dea/9050': 'Codeine',
'drug/dea/9056': 'EtorphineExceptHCl',
'drug/dea/9064': 'Buprenorphine',
'drug/dea/9120': 'Dihydrocodeine',
'drug/dea/9143': 'Oxycodone',
'drug/dea/9150': 'Hydromorphone',
'drug/dea/9168': 'Difenoxin',
'drug/dea/9170': 'Diphenoxylate',
'drug/dea/9180L': 'Ecgonine',
'drug/dea/9190': 'Ethylmorphine',
'drug/dea/9193': 'Hydrocodone',
'drug/dea/9200': 'Heroin',
'drug/dea/9220L': 'Levorphanol',
'drug/dea/9230': 'Pethidine',
'drug/dea/9250B': 'Methadone',
'drug/dea/9273D': 'BulkDextropropoxyphene',
'drug/dea/9300': 'Morphine',
'drug/dea/9313': 'Normorphine',
'drug/dea/9333': 'Thebaine',
'drug/dea/9411': 'Naloxone',
'drug/dea/9600': 'RawOpium',
'drug/dea/9630': 'TincuredOpium',
'drug/dea/9639': 'PowderedOpium',
'drug/dea/9652': 'Oxymorphone',
'drug/dea/9655': 'Paregoric',
'drug/dea/9665': '14Hydroxycodeinone',
'drug/dea/9668': 'Noroxymorphone',
'drug/dea/9670': 'PoppyStrawConcentrate',
'drug/dea/9737': 'Alfentanil',
'drug/dea/9739': 'Remifentanil',
'drug/dea/9740': 'Sufentanil',
'drug/dea/9743': 'Carfentanil',
'drug/dea/9780': 'Tapentadol',
'drug/dea/9801': 'Fentanyl',
}
# Exceptionally long and confusing cause of death names are manually renamed.
MANUAL_CAUSE_OF_DEATH_RENAMINGS = {
'ICD10/D50-D89': 'DiseasesOfBloodAndBloodFormingOrgansAndImmuneDisorders',
'ICD10/R00-R99': 'AbnormalNotClassfied',
'ICD10/U00-U99': 'SpecialCases',
'ICD10/V01-Y89': 'ExternalCauses'
}
# List of properties to perform a numerical quantity remap on.
NUMERICAL_QUANTITY_PROPERTIES_TO_REMAP = [
'income', 'age', 'householderAge', 'homeValue', 'dateBuilt', 'grossRent',
'numberOfRooms', 'numberOfRooms', 'householdSize', 'numberOfVehicles',
'propertyTax'
]
# Regex rules to apply to numerical quantity remap.
REGEX_NUMERICAL_QUANTITY_RENAMINGS = [
# [A-Za-z]+[0-9]+Onwards -> [0-9]+OrMore[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)Onwards$"),
lambda match: match.group(2) + "OrMore" + match.group(1)),
# [A-Za-z]+Upto[0-9]+ -> Upto[0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)Upto([0-9]+)$"),
lambda match: "Upto" + match.group(2) + match.group(1)),
# [A-Za-z]+[0-9]+To[0-9]+-> [0-9]+To[0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)To([0-9]+)$"),
lambda match: match.group(2) + "To" + match.group(3) + match.group(1)),
# [A-Za-z]+[0-9]+ -> [0-9]+[A-Za-z]+
(re.compile(r"^([A-Za-z]+)([0-9]+)$"),
lambda match: match.group(2) + match.group(1))
]
# Constants that power Statistical Variable documentation.
# Tuple is defined as follows:
# (Name of Vertical), (Population Type include),
# (whether to subgroup by all population types in demographic),
# (if subgroup all, whether you should group population types with more than 1
# statistical variable).
SVPopGroup = (collections.namedtuple(
'STAT_VAR_DOCUMENTION_GROUPING',
'vertical popTypes subgroupAllPops subgroupIfMoreThanOne'))
STAT_VAR_POPULATION_GROUPINGS = [
SVPopGroup("Demographics",
['Person', 'Parent', 'Child', 'Student', 'Teacher'],
True, False),
SVPopGroup("Crime",
['CriminalActivities'],
False, False),
SVPopGroup("Health",
['Death', 'DrugDistribution', 'MedicalConditionIncident',
'MedicalTest', 'MedicareEnrollee'],
True, False),
SVPopGroup("Employment",
['Worker', 'Establishment', 'JobPosting',
'UnemploymentInsuranceClaim'],
True, False),
SVPopGroup("Economic",
['EconomicActivity', 'Consumption', 'Debt', 'TreasuryBill',
'TreasuryBond', 'TreasuryNote'],
True, False),
SVPopGroup("Environment",
['Emissions'],
False, False),
SVPopGroup("Household",
['Household'],
False, False),
SVPopGroup("HousingUnit",
['HousingUnit'],
False, False)
]
# HTML for statistical variable markdown.
DOCUMENTATION_BASE_MARKDOWN = \
"""---
layout: default
title: Statistical Variables
nav_order: 2
---
# Statistical Variables
Many of the Data Commons APIs deal with Data Commons nodes of the type
[StatisticalVariable](https://browser.datacommons.org/kg?dcid=StatisticalVariable).
The following list contains all Statistical Variables with human-readable identifiers,
grouped by domain and population type. Some verticals are grouped such that all
population types are a sub-level grouping, while others (like disasters), only
group by population types when there are multiple statistical variables for that
population type.
<style>
details details {
margin-left: 24px;
}
details details summary {
font-size: 16px;
}
li {
white-space: nowrap;
}
</style>
"""
DOCUMENTATION_HEADER_START = \
"""
<details>
<summary>{HEADER}</summary>
"""
DOCUMENTATION_DROPDOWN_START = \
"""
<details>
<summary>{POPULATION_TYPE}</summary>
<ul>
"""
| datacommonsorg/tools | stat_var_renaming/stat_var_renaming_constants.py | Python | apache-2.0 | 13,060 |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import android_w3c_driver, appium_command, get_httpretty_request_body
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}',
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
| appium/python-client | test/unit/webdriver/device/remote_fs_test.py | Python | apache-2.0 | 3,434 |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import List, Dict
from apache_beam.coders import PickleCoder, Coder
from pyflink.common import Row, RowKind
from pyflink.common.state import ListState, MapState
from pyflink.fn_execution.coders import from_proto
from pyflink.fn_execution.operation_utils import is_built_in_function, load_aggregate_function
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
from pyflink.table import AggregateFunction, FunctionContext
from pyflink.table.data_view import ListView, MapView
def join_row(left: Row, right: Row):
fields = []
for value in left:
fields.append(value)
for value in right:
fields.append(value)
return Row(*fields)
def extract_data_view_specs_from_accumulator(current_index, accumulator):
# for built in functions we extract the data view specs from their accumulator
i = -1
extracted_specs = []
for field in accumulator:
i += 1
# TODO: infer the coder from the input types and output type of the built-in functions
if isinstance(field, MapView):
extracted_specs.append(MapViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder(), PickleCoder()))
elif isinstance(field, ListView):
extracted_specs.append(ListViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder()))
return extracted_specs
def extract_data_view_specs(udfs):
extracted_udf_data_view_specs = []
current_index = -1
for udf in udfs:
current_index += 1
udf_data_view_specs_proto = udf.specs
if not udf_data_view_specs_proto:
if is_built_in_function(udf.payload):
built_in_function = load_aggregate_function(udf.payload)
accumulator = built_in_function.create_accumulator()
extracted_udf_data_view_specs.append(
extract_data_view_specs_from_accumulator(current_index, accumulator))
else:
extracted_udf_data_view_specs.append([])
else:
extracted_specs = []
for spec_proto in udf_data_view_specs_proto:
state_id = spec_proto.name
field_index = spec_proto.field_index
if spec_proto.HasField("list_view"):
element_coder = from_proto(spec_proto.list_view.element_type)
extracted_specs.append(ListViewSpec(state_id, field_index, element_coder))
elif spec_proto.HasField("map_view"):
key_coder = from_proto(spec_proto.map_view.key_type)
value_coder = from_proto(spec_proto.map_view.value_type)
extracted_specs.append(
MapViewSpec(state_id, field_index, key_coder, value_coder))
else:
raise Exception("Unsupported data view spec type: " + spec_proto.type)
extracted_udf_data_view_specs.append(extracted_specs)
if all([len(i) == 0 for i in extracted_udf_data_view_specs]):
return []
return extracted_udf_data_view_specs
class StateListView(ListView):
def __init__(self, list_state: ListState):
super().__init__()
self._list_state = list_state
def get(self):
return self._list_state.get()
def add(self, value):
self._list_state.add(value)
def add_all(self, values):
self._list_state.add_all(values)
def clear(self):
self._list_state.clear()
def __hash__(self) -> int:
return hash([i for i in self.get()])
class StateMapView(MapView):
def __init__(self, map_state: MapState):
super().__init__()
self._map_state = map_state
def get(self, key):
return self._map_state.get(key)
def put(self, key, value) -> None:
self._map_state.put(key, value)
def put_all(self, dict_value) -> None:
self._map_state.put_all(dict_value)
def remove(self, key) -> None:
self._map_state.remove(key)
def contains(self, key) -> bool:
return self._map_state.contains(key)
def items(self):
return self._map_state.items()
def keys(self):
return self._map_state.keys()
def values(self):
return self._map_state.values()
def is_empty(self) -> bool:
return self._map_state.is_empty()
def clear(self) -> None:
return self._map_state.clear()
class DataViewSpec(object):
def __init__(self, state_id, field_index):
self.state_id = state_id
self.field_index = field_index
class ListViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, element_coder):
super(ListViewSpec, self).__init__(state_id, field_index)
self.element_coder = element_coder
class MapViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, key_coder, value_coder):
super(MapViewSpec, self).__init__(state_id, field_index)
self.key_coder = key_coder
self.value_coder = value_coder
class DistinctViewDescriptor(object):
def __init__(self, input_extractor, filter_args):
self._input_extractor = input_extractor
self._filter_args = filter_args
def get_input_extractor(self):
return self._input_extractor
def get_filter_args(self):
return self._filter_args
class RowKeySelector(object):
"""
A simple key selector used to extract the current key from the input Row according to the
group-by field indexes.
"""
def __init__(self, grouping):
self.grouping = grouping
def get_key(self, data: Row):
return Row(*[data[i] for i in self.grouping])
class StateDataViewStore(object):
"""
The class used to manage the DataViews used in :class:`AggsHandleFunction`. Currently
DataView is not supported so it is just a wrapper of the :class:`FunctionContext`.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend: RemoteKeyedStateBackend):
self._function_context = function_context
self._keyed_state_backend = keyed_state_backend
def get_runtime_context(self):
return self._function_context
def get_state_list_view(self, state_name, element_coder):
return StateListView(self._keyed_state_backend.get_list_state(state_name, element_coder))
def get_state_map_view(self, state_name, key_coder, value_coder):
return StateMapView(
self._keyed_state_backend.get_map_state(state_name, key_coder, value_coder))
class AggsHandleFunction(ABC):
"""
The base class for handling aggregate functions.
"""
@abstractmethod
def open(self, state_data_view_store):
"""
Initialization method for the function. It is called before the actual working methods.
:param state_data_view_store: The object used to manage the DataView.
"""
pass
@abstractmethod
def accumulate(self, input_data: Row):
"""
Accumulates the input values to the accumulators.
:param input_data: Input values bundled in a row.
"""
pass
@abstractmethod
def retract(self, input_data: Row):
"""
Retracts the input values from the accumulators.
:param input_data: Input values bundled in a row.
"""
@abstractmethod
def merge(self, accumulators: Row):
"""
Merges the other accumulators into current accumulators.
:param accumulators: The other row of accumulators.
"""
pass
@abstractmethod
def set_accumulators(self, accumulators: Row):
"""
Set the current accumulators (saved in a row) which contains the current aggregated results.
In streaming: accumulators are stored in the state, we need to restore aggregate buffers
from state.
In batch: accumulators are stored in the dict, we need to restore aggregate buffers from
dict.
:param accumulators: Current accumulators.
"""
pass
@abstractmethod
def get_accumulators(self) -> Row:
"""
Gets the current accumulators (saved in a row) which contains the current
aggregated results.
:return: The current accumulators.
"""
pass
@abstractmethod
def create_accumulators(self) -> Row:
"""
Initializes the accumulators and save them to an accumulators row.
:return: A row of accumulators which contains the aggregated results.
"""
pass
@abstractmethod
def cleanup(self):
"""
Cleanup for the retired accumulators state.
"""
pass
@abstractmethod
def get_value(self) -> Row:
"""
Gets the result of the aggregation from the current accumulators.
:return: The final result (saved in a row) of the current accumulators.
"""
pass
@abstractmethod
def close(self):
"""
Tear-down method for this function. It can be used for clean up work.
By default, this method does nothing.
"""
pass
class SimpleAggsHandleFunction(AggsHandleFunction):
"""
A simple AggsHandleFunction implementation which provides the basic functionality.
"""
def __init__(self,
udfs: List[AggregateFunction],
input_extractors: List,
index_of_count_star: int,
count_star_inserted: bool,
udf_data_view_specs: List[List[DataViewSpec]],
filter_args: List[int],
distinct_indexes: List[int],
distinct_view_descriptors: Dict[int, DistinctViewDescriptor]):
self._udfs = udfs
self._input_extractors = input_extractors
self._accumulators = None # type: Row
self._get_value_indexes = [i for i in range(len(udfs))]
if index_of_count_star >= 0 and count_star_inserted:
# The record count is used internally, should be ignored by the get_value method.
self._get_value_indexes.remove(index_of_count_star)
self._udf_data_view_specs = udf_data_view_specs
self._udf_data_views = []
self._filter_args = filter_args
self._distinct_indexes = distinct_indexes
self._distinct_view_descriptors = distinct_view_descriptors
self._distinct_data_views = {}
def open(self, state_data_view_store):
for udf in self._udfs:
udf.open(state_data_view_store.get_runtime_context())
self._udf_data_views = []
for data_view_specs in self._udf_data_view_specs:
data_views = {}
for data_view_spec in data_view_specs:
if isinstance(data_view_spec, ListViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_list_view(
data_view_spec.state_id,
PickleCoder())
elif isinstance(data_view_spec, MapViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_map_view(
data_view_spec.state_id,
PickleCoder(),
PickleCoder())
self._udf_data_views.append(data_views)
for key in self._distinct_view_descriptors.keys():
self._distinct_data_views[key] = state_data_view_store.get_state_map_view(
"agg%ddistinct" % key,
PickleCoder(),
PickleCoder())
def accumulate(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] += 1
else:
self._distinct_data_views[i][args] = 1
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0:
if args in self._distinct_data_views[self._distinct_indexes[i]]:
if self._distinct_data_views[self._distinct_indexes[i]][args] > 1:
continue
else:
raise Exception(
"The args are not in the distinct data view, this should not happen.")
self._udfs[i].accumulate(self._accumulators[i], *args)
def retract(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] -= 1
if self._distinct_data_views[i][args] == 0:
del self._distinct_data_views[i][args]
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0 and \
args in self._distinct_data_views[self._distinct_indexes[i]]:
continue
self._udfs[i].retract(self._accumulators[i], *args)
def merge(self, accumulators: Row):
for i in range(len(self._udfs)):
self._udfs[i].merge(self._accumulators[i], [accumulators[i]])
def set_accumulators(self, accumulators: Row):
if self._udf_data_views:
for i in range(len(self._udf_data_views)):
for index, data_view in self._udf_data_views[i].items():
accumulators[i][index] = data_view
self._accumulators = accumulators
def get_accumulators(self):
return self._accumulators
def create_accumulators(self):
return Row(*[udf.create_accumulator() for udf in self._udfs])
def cleanup(self):
for i in range(len(self._udf_data_views)):
for data_view in self._udf_data_views[i].values():
data_view.clear()
def get_value(self):
return Row(*[self._udfs[i].get_value(self._accumulators[i])
for i in self._get_value_indexes])
def close(self):
for udf in self._udfs:
udf.close()
class RecordCounter(ABC):
"""
The RecordCounter is used to count the number of input records under the current key.
"""
@abstractmethod
def record_count_is_zero(self, acc):
pass
@staticmethod
def of(index_of_count_star):
if index_of_count_star >= 0:
return RetractionRecordCounter(index_of_count_star)
else:
return AccumulationRecordCounter()
class AccumulationRecordCounter(RecordCounter):
def record_count_is_zero(self, acc):
# when all the inputs are accumulations, the count will never be zero
return acc is None
class RetractionRecordCounter(RecordCounter):
def __init__(self, index_of_count_star):
self._index_of_count_star = index_of_count_star
def record_count_is_zero(self, acc):
# We store the counter in the accumulator and the counter is never be null
return acc is None or acc[self._index_of_count_star][0] == 0
class GroupAggFunction(object):
def __init__(self,
aggs_handle: AggsHandleFunction,
key_selector: RowKeySelector,
state_backend: RemoteKeyedStateBackend,
state_value_coder: Coder,
generate_update_before: bool,
state_cleaning_enabled: bool,
index_of_count_star: int):
self.aggs_handle = aggs_handle
self.generate_update_before = generate_update_before
self.state_cleaning_enabled = state_cleaning_enabled
self.key_selector = key_selector
self.state_value_coder = state_value_coder
self.state_backend = state_backend
self.record_counter = RecordCounter.of(index_of_count_star)
def open(self, function_context: FunctionContext):
self.aggs_handle.open(StateDataViewStore(function_context, self.state_backend))
def close(self):
self.aggs_handle.close()
def process_element(self, input_data: Row):
key = self.key_selector.get_key(input_data)
self.state_backend.set_current_key(key)
self.state_backend.clear_cached_iterators()
accumulator_state = self.state_backend.get_value_state(
"accumulators", self.state_value_coder)
accumulators = accumulator_state.value()
if accumulators is None:
if self.is_retract_msg(input_data):
# Don't create a new accumulator for a retraction message. This might happen if the
# retraction message is the first message for the key or after a state clean up.
return
first_row = True
accumulators = self.aggs_handle.create_accumulators()
else:
first_row = False
# set accumulators to handler first
self.aggs_handle.set_accumulators(accumulators)
# get previous aggregate result
pre_agg_value = self.aggs_handle.get_value()
# update aggregate result and set to the newRow
if self.is_accumulate_msg(input_data):
# accumulate input
self.aggs_handle.accumulate(input_data)
else:
# retract input
self.aggs_handle.retract(input_data)
# get current aggregate result
new_agg_value = self.aggs_handle.get_value()
# get accumulator
accumulators = self.aggs_handle.get_accumulators()
if not self.record_counter.record_count_is_zero(accumulators):
# we aggregated at least one record for this key
# update the state
accumulator_state.update(accumulators)
# if this was not the first row and we have to emit retractions
if not first_row:
if not self.state_cleaning_enabled and pre_agg_value == new_agg_value:
# newRow is the same as before and state cleaning is not enabled.
# We do not emit retraction and acc message.
# If state cleaning is enabled, we have to emit messages to prevent too early
# state eviction of downstream operators.
return
else:
# retract previous result
if self.generate_update_before:
# prepare UPDATE_BEFORE message for previous row
retract_row = join_row(key, pre_agg_value)
retract_row.set_row_kind(RowKind.UPDATE_BEFORE)
yield retract_row
# prepare UPDATE_AFTER message for new row
result_row = join_row(key, new_agg_value)
result_row.set_row_kind(RowKind.UPDATE_AFTER)
else:
# this is the first, output new result
# prepare INSERT message for new row
result_row = join_row(key, new_agg_value)
result_row.set_row_kind(RowKind.INSERT)
yield result_row
else:
# we retracted the last record for this key
# sent out a delete message
if not first_row:
# prepare delete message for previous row
result_row = join_row(key, pre_agg_value)
result_row.set_row_kind(RowKind.DELETE)
yield result_row
# and clear all state
accumulator_state.clear()
# cleanup dataview under current key
self.aggs_handle.cleanup()
def on_timer(self, key):
if self.state_cleaning_enabled:
self.state_backend.set_current_key(key)
accumulator_state = self.state_backend.get_value_state(
"accumulators", self.state_value_coder)
accumulator_state.clear()
self.aggs_handle.cleanup()
@staticmethod
def is_retract_msg(data: Row):
return data.get_row_kind() == RowKind.UPDATE_BEFORE \
or data.get_row_kind() == RowKind.DELETE
@staticmethod
def is_accumulate_msg(data: Row):
return data.get_row_kind() == RowKind.UPDATE_AFTER \
or data.get_row_kind() == RowKind.INSERT
| greghogan/flink | flink-python/pyflink/fn_execution/aggregate.py | Python | apache-2.0 | 22,865 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For misc util methods used with compute.
"""
from nova import db
from nova import flags
from nova import context
from nova import test
from nova import log as logging
from nova import utils
import nova.image.fake
from nova.compute import utils as compute_utils
from nova.compute import instance_types
from nova.notifier import test_notifier
from nova.tests import fake_network
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
class UsageInfoTestCase(test.TestCase):
def setUp(self):
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
spectacular=True)
super(UsageInfoTestCase, self).setUp()
self.stubs.Set(nova.network.API, 'get_instance_nw_info',
fake_get_nw_info)
self.flags(connection_type='fake',
stub_network=True,
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show)
def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst.update(params)
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
"""Ensure 'exists' notification generates apropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
compute_utils.notify_usage_exists(instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'compute.instance.exists')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], self.project_id)
self.assertEquals(payload['user_id'], self.user_id)
self.assertEquals(payload['instance_id'], instance.uuid)
self.assertEquals(payload['instance_type'], 'm1.tiny')
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
self.assertEquals(str(payload['instance_type_id']), str(type_id))
for attr in ('display_name', 'created_at', 'launched_at',
'state', 'state_description',
'bandwidth', 'audit_period_beginning',
'audit_period_ending'):
self.assertTrue(attr in payload,
msg="Key %s not in payload" % attr)
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
self.compute.terminate_instance(self.context, instance['uuid'])
| russellb/nova | nova/tests/test_compute_utils.py | Python | apache-2.0 | 4,385 |
class Vehicle(object):
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
def foo(self, a, b, c):
print 'does some work'
print a, b, c
class Car(Vehicle):
def __init__(self, doors, *args):
Vehicle.__init__(self, *args)
self.doors = doors
def foo(self, *args, **kw):
print args, kw
super(Car, self).foo(*args)
class Boat(Vehicle):
def __init__(self, power, *args):
Vehicle.__init__(self, *args)
if power not in ('propeller', 'sail'):
print 'warning: drive type not acceptable'
raise TypeError
self.power = power
class Airplane(Vehicle):
def __init__(self):
pass
if __name__ == '__main__':
car = Car('honda', 'civic', '2002', '2')
car.foo(1, 2, 3)
# ============================ EOF =============================
| NMTHydro/SWACodingMeeting | homework/for_meeting_3/vehicles_David.py | Python | apache-2.0 | 930 |
"""The tests for the logbook component."""
# pylint: disable=protected-access,invalid-name
import collections
from datetime import datetime, timedelta
import json
import unittest
import pytest
import voluptuous as vol
from homeassistant.components import logbook, recorder
from homeassistant.components.alexa.smart_home import EVENT_ALEXA_SMART_HOME
from homeassistant.components.automation import EVENT_AUTOMATION_TRIGGERED
from homeassistant.components.recorder.models import process_timestamp_to_utc_isoformat
from homeassistant.components.script import EVENT_SCRIPT_STARTED
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_NAME,
ATTR_SERVICE,
CONF_DOMAINS,
CONF_ENTITIES,
CONF_EXCLUDE,
CONF_INCLUDE,
EVENT_CALL_SERVICE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
from homeassistant.helpers.entityfilter import CONF_ENTITY_GLOBS
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import get_test_home_assistant, init_recorder_component, mock_platform
from tests.components.recorder.common import trigger_db_commit
class TestComponentLogbook(unittest.TestCase):
"""Test the History component."""
EMPTY_CONFIG = logbook.CONFIG_SCHEMA({logbook.DOMAIN: {}})
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
init_recorder_component(self.hass) # Force an in memory DB
with patch("homeassistant.components.http.start_http_server_and_save_config"):
assert setup_component(self.hass, logbook.DOMAIN, self.EMPTY_CONFIG)
self.addCleanup(self.hass.stop)
def test_service_call_create_logbook_entry(self):
"""Test if service call create log book entry."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
self.hass.services.call(
logbook.DOMAIN,
"log",
{
logbook.ATTR_NAME: "Alarm",
logbook.ATTR_MESSAGE: "is triggered",
logbook.ATTR_DOMAIN: "switch",
logbook.ATTR_ENTITY_ID: "switch.test_switch",
},
True,
)
self.hass.services.call(
logbook.DOMAIN,
"log",
{
logbook.ATTR_NAME: "This entry",
logbook.ATTR_MESSAGE: "has no domain or entity_id",
},
True,
)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
trigger_db_commit(self.hass)
self.hass.block_till_done()
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
events = list(
logbook._get_events(
self.hass,
dt_util.utcnow() - timedelta(hours=1),
dt_util.utcnow() + timedelta(hours=1),
)
)
assert len(events) == 2
assert len(calls) == 2
first_call = calls[-2]
assert first_call.data.get(logbook.ATTR_NAME) == "Alarm"
assert first_call.data.get(logbook.ATTR_MESSAGE) == "is triggered"
assert first_call.data.get(logbook.ATTR_DOMAIN) == "switch"
assert first_call.data.get(logbook.ATTR_ENTITY_ID) == "switch.test_switch"
last_call = calls[-1]
assert last_call.data.get(logbook.ATTR_NAME) == "This entry"
assert last_call.data.get(logbook.ATTR_MESSAGE) == "has no domain or entity_id"
assert last_call.data.get(logbook.ATTR_DOMAIN) == "logbook"
def test_service_call_create_log_book_entry_no_message(self):
"""Test if service call create log book entry without message."""
calls = []
@ha.callback
def event_listener(event):
"""Append on event."""
calls.append(event)
self.hass.bus.listen(logbook.EVENT_LOGBOOK_ENTRY, event_listener)
with pytest.raises(vol.Invalid):
self.hass.services.call(logbook.DOMAIN, "log", {}, True)
# Logbook entry service call results in firing an event.
# Our service call will unblock when the event listeners have been
# scheduled. This means that they may not have been processed yet.
self.hass.block_till_done()
assert len(calls) == 0
def test_humanify_filter_sensor(self):
"""Test humanify filter too frequent sensor values."""
entity_id = "sensor.bla"
pointA = dt_util.utcnow().replace(minute=2)
pointB = pointA.replace(minute=5)
pointC = pointA + timedelta(minutes=logbook.GROUP_BY_MINUTES)
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
eventA = self.create_state_changed_event(pointA, entity_id, 10)
eventB = self.create_state_changed_event(pointB, entity_id, 20)
eventC = self.create_state_changed_event(pointC, entity_id, 30)
entries = list(
logbook.humanify(self.hass, (eventA, eventB, eventC), entity_attr_cache, {})
)
assert len(entries) == 2
self.assert_entry(entries[0], pointB, "bla", entity_id=entity_id)
self.assert_entry(entries[1], pointC, "bla", entity_id=entity_id)
def test_home_assistant_start_stop_grouped(self):
"""Test if HA start and stop events are grouped.
Events that are occurring in the same minute.
"""
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(EVENT_HOMEASSISTANT_STOP),
MockLazyEventPartialState(EVENT_HOMEASSISTANT_START),
),
entity_attr_cache,
{},
),
)
assert len(entries) == 1
self.assert_entry(
entries[0], name="Home Assistant", message="restarted", domain=ha.DOMAIN
)
def test_home_assistant_start(self):
"""Test if HA start is not filtered or converted into a restart."""
entity_id = "switch.bla"
pointA = dt_util.utcnow()
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(EVENT_HOMEASSISTANT_START),
self.create_state_changed_event(pointA, entity_id, 10),
),
entity_attr_cache,
{},
)
)
assert len(entries) == 2
self.assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
self.assert_entry(entries[1], pointA, "bla", entity_id=entity_id)
def test_process_custom_logbook_entries(self):
"""Test if custom log book entries get added as an entry."""
name = "Nice name"
message = "has a custom entry"
entity_id = "sun.sun"
entity_attr_cache = logbook.EntityAttributeCache(self.hass)
entries = list(
logbook.humanify(
self.hass,
(
MockLazyEventPartialState(
logbook.EVENT_LOGBOOK_ENTRY,
{
logbook.ATTR_NAME: name,
logbook.ATTR_MESSAGE: message,
logbook.ATTR_ENTITY_ID: entity_id,
},
),
),
entity_attr_cache,
{},
)
)
assert len(entries) == 1
self.assert_entry(entries[0], name=name, message=message, entity_id=entity_id)
# pylint: disable=no-self-use
def assert_entry(
self, entry, when=None, name=None, message=None, domain=None, entity_id=None
):
"""Assert an entry is what is expected."""
return _assert_entry(entry, when, name, message, domain, entity_id)
def create_state_changed_event(
self,
event_time_fired,
entity_id,
state,
attributes=None,
last_changed=None,
last_updated=None,
):
"""Create state changed event."""
old_state = ha.State(
entity_id, "old", attributes, last_changed, last_updated
).as_dict()
new_state = ha.State(
entity_id, state, attributes, last_changed, last_updated
).as_dict()
return self.create_state_changed_event_from_old_new(
entity_id, event_time_fired, old_state, new_state
)
# pylint: disable=no-self-use
def create_state_changed_event_from_old_new(
self, entity_id, event_time_fired, old_state, new_state
):
"""Create a state changed event from a old and new state."""
attributes = {}
if new_state is not None:
attributes = new_state.get("attributes")
attributes_json = json.dumps(attributes, cls=JSONEncoder)
row = collections.namedtuple(
"Row",
[
"event_type"
"event_data"
"time_fired"
"context_id"
"context_user_id"
"state"
"entity_id"
"domain"
"attributes"
"state_id",
"old_state_id",
],
)
row.event_type = EVENT_STATE_CHANGED
row.event_data = "{}"
row.attributes = attributes_json
row.time_fired = event_time_fired
row.state = new_state and new_state.get("state")
row.entity_id = entity_id
row.domain = entity_id and ha.split_entity_id(entity_id)[0]
row.context_id = None
row.context_user_id = None
row.old_state_id = old_state and 1
row.state_id = new_state and 1
return logbook.LazyEventPartialState(row)
async def test_logbook_view(hass, hass_client):
"""Test the logbook view."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
response = await client.get(f"/api/logbook/{dt_util.utcnow().isoformat()}")
assert response.status == 200
async def test_logbook_view_period_entity(hass, hass_client):
"""Test the logbook view with period and entity."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id_test = "switch.test"
hass.states.async_set(entity_id_test, STATE_OFF)
hass.states.async_set(entity_id_test, STATE_ON)
entity_id_second = "switch.second"
hass.states.async_set(entity_id_second, STATE_OFF)
hass.states.async_set(entity_id_second, STATE_ON)
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0]["entity_id"] == entity_id_test
assert response_json[1]["entity_id"] == entity_id_second
# Test today entries with filter by period
response = await client.get(f"/api/logbook/{start_date.isoformat()}?period=1")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0]["entity_id"] == entity_id_test
assert response_json[1]["entity_id"] == entity_id_second
# Test today entries with filter by entity_id
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 1
assert response_json[0]["entity_id"] == entity_id_test
# Test entries for 3 days with filter by entity_id
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?period=3&entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 1
assert response_json[0]["entity_id"] == entity_id_test
# Tomorrow time 00:00:00
start = (dt_util.utcnow() + timedelta(days=1)).date()
start_date = datetime(start.year, start.month, start.day)
# Test tomorrow entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 0
# Test tomorrow entries with filter by entity_id
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 0
# Test entries from tomorrow to 3 days ago with filter by entity_id
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?period=3&entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 1
assert response_json[0]["entity_id"] == entity_id_test
async def test_logbook_describe_event(hass, hass_client):
"""Test teaching logbook about a new event."""
await hass.async_add_executor_job(init_recorder_component, hass)
def _describe(event):
"""Describe an event."""
return {"name": "Test Name", "message": "tested a message"}
hass.config.components.add("fake_integration")
mock_platform(
hass,
"fake_integration.logbook",
Mock(
async_describe_events=lambda hass, async_describe_event: async_describe_event(
"test_domain", "some_event", _describe
)
),
)
assert await async_setup_component(hass, "logbook", {})
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow() - timedelta(seconds=5),
):
hass.bus.async_fire("some_event")
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(
hass.data[recorder.DATA_INSTANCE].block_till_done
)
client = await hass_client()
response = await client.get("/api/logbook")
results = await response.json()
assert len(results) == 1
event = results[0]
assert event["name"] == "Test Name"
assert event["message"] == "tested a message"
assert event["domain"] == "test_domain"
async def test_exclude_described_event(hass, hass_client):
"""Test exclusions of events that are described by another integration."""
name = "My Automation Rule"
entity_id = "automation.excluded_rule"
entity_id2 = "automation.included_rule"
entity_id3 = "sensor.excluded_domain"
def _describe(event):
"""Describe an event."""
return {
"name": "Test Name",
"message": "tested a message",
"entity_id": event.data.get(ATTR_ENTITY_ID),
}
def async_describe_events(hass, async_describe_event):
"""Mock to describe events."""
async_describe_event("automation", "some_automation_event", _describe)
async_describe_event("sensor", "some_event", _describe)
hass.config.components.add("fake_integration")
mock_platform(
hass,
"fake_integration.logbook",
Mock(async_describe_events=async_describe_events),
)
await hass.async_add_executor_job(init_recorder_component, hass)
assert await async_setup_component(
hass,
logbook.DOMAIN,
{
logbook.DOMAIN: {
CONF_EXCLUDE: {CONF_DOMAINS: ["sensor"], CONF_ENTITIES: [entity_id]}
}
},
)
with patch(
"homeassistant.util.dt.utcnow",
return_value=dt_util.utcnow() - timedelta(seconds=5),
):
hass.bus.async_fire(
"some_automation_event",
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id},
)
hass.bus.async_fire(
"some_automation_event",
{logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id2},
)
hass.bus.async_fire(
"some_event", {logbook.ATTR_NAME: name, logbook.ATTR_ENTITY_ID: entity_id3}
)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(
hass.data[recorder.DATA_INSTANCE].block_till_done
)
client = await hass_client()
response = await client.get("/api/logbook")
results = await response.json()
assert len(results) == 1
event = results[0]
assert event["name"] == "Test Name"
assert event["entity_id"] == "automation.included_rule"
async def test_logbook_view_end_time_entity(hass, hass_client):
"""Test the logbook view with end_time and entity."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id_test = "switch.test"
hass.states.async_set(entity_id_test, STATE_OFF)
hass.states.async_set(entity_id_test, STATE_ON)
entity_id_second = "switch.second"
hass.states.async_set(entity_id_second, STATE_OFF)
hass.states.async_set(entity_id_second, STATE_ON)
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0]["entity_id"] == entity_id_test
assert response_json[1]["entity_id"] == entity_id_second
# Test entries for 3 days with filter by entity_id
end_time = start + timedelta(hours=72)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 1
assert response_json[0]["entity_id"] == entity_id_test
# Tomorrow time 00:00:00
start = dt_util.utcnow()
start_date = datetime(start.year, start.month, start.day)
# Test entries from today to 3 days with filter by entity_id
end_time = start_date + timedelta(hours=72)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test"
)
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 1
assert response_json[0]["entity_id"] == entity_id_test
async def test_logbook_entity_filter_with_automations(hass, hass_client):
"""Test the logbook view with end_time and entity with automations and scripts."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await async_setup_component(hass, "automation", {})
await async_setup_component(hass, "script", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id_test = "alarm_control_panel.area_001"
hass.states.async_set(entity_id_test, STATE_OFF)
hass.states.async_set(entity_id_test, STATE_ON)
entity_id_second = "alarm_control_panel.area_002"
hass.states.async_set(entity_id_second, STATE_OFF)
hass.states.async_set(entity_id_second, STATE_ON)
hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_NAME: "Mock automation", ATTR_ENTITY_ID: "automation.mock_automation"},
)
hass.bus.async_fire(
EVENT_SCRIPT_STARTED,
{ATTR_NAME: "Mock script", ATTR_ENTITY_ID: "script.mock_script"},
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}"
)
assert response.status == 200
json_dict = await response.json()
assert json_dict[0]["entity_id"] == entity_id_test
assert json_dict[1]["entity_id"] == entity_id_second
assert json_dict[2]["entity_id"] == "automation.mock_automation"
assert json_dict[3]["entity_id"] == "script.mock_script"
assert json_dict[4]["domain"] == "homeassistant"
# Test entries for 3 days with filter by entity_id
end_time = start + timedelta(hours=72)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=alarm_control_panel.area_001"
)
assert response.status == 200
json_dict = await response.json()
assert len(json_dict) == 1
assert json_dict[0]["entity_id"] == entity_id_test
# Tomorrow time 00:00:00
start = dt_util.utcnow()
start_date = datetime(start.year, start.month, start.day)
# Test entries from today to 3 days with filter by entity_id
end_time = start_date + timedelta(hours=72)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=alarm_control_panel.area_002"
)
assert response.status == 200
json_dict = await response.json()
assert len(json_dict) == 1
assert json_dict[0]["entity_id"] == entity_id_second
async def test_filter_continuous_sensor_values(hass, hass_client):
"""Test remove continuous sensor events from logbook."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id_test = "switch.test"
hass.states.async_set(entity_id_test, STATE_OFF)
hass.states.async_set(entity_id_test, STATE_ON)
entity_id_second = "sensor.bla"
hass.states.async_set(entity_id_second, STATE_OFF, {"unit_of_measurement": "foo"})
hass.states.async_set(entity_id_second, STATE_ON, {"unit_of_measurement": "foo"})
entity_id_third = "light.bla"
hass.states.async_set(entity_id_third, STATE_OFF, {"unit_of_measurement": "foo"})
hass.states.async_set(entity_id_third, STATE_ON, {"unit_of_measurement": "foo"})
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0]["entity_id"] == entity_id_test
assert response_json[1]["entity_id"] == entity_id_third
async def test_exclude_new_entities(hass, hass_client):
"""Test if events are excluded on first update."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id = "climate.bla"
entity_id2 = "climate.blu"
hass.states.async_set(entity_id, STATE_OFF)
hass.states.async_set(entity_id2, STATE_ON)
hass.states.async_set(entity_id2, STATE_OFF)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 2
assert response_json[0]["entity_id"] == entity_id2
assert response_json[1]["domain"] == "homeassistant"
assert response_json[1]["message"] == "started"
async def test_exclude_removed_entities(hass, hass_client):
"""Test if events are excluded on last update."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
entity_id = "climate.bla"
entity_id2 = "climate.blu"
hass.states.async_set(entity_id, STATE_ON)
hass.states.async_set(entity_id, STATE_OFF)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.states.async_set(entity_id2, STATE_ON)
hass.states.async_set(entity_id2, STATE_OFF)
hass.states.async_remove(entity_id)
hass.states.async_remove(entity_id2)
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 3
assert response_json[0]["entity_id"] == entity_id
assert response_json[1]["domain"] == "homeassistant"
assert response_json[1]["message"] == "started"
assert response_json[2]["entity_id"] == entity_id2
async def test_exclude_attribute_changes(hass, hass_client):
"""Test if events of attribute changes are filtered."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.states.async_set("light.kitchen", STATE_OFF)
hass.states.async_set("light.kitchen", STATE_ON, {"brightness": 100})
hass.states.async_set("light.kitchen", STATE_ON, {"brightness": 200})
hass.states.async_set("light.kitchen", STATE_ON, {"brightness": 300})
hass.states.async_set("light.kitchen", STATE_ON, {"brightness": 400})
hass.states.async_set("light.kitchen", STATE_OFF)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries without filters
response = await client.get(f"/api/logbook/{start_date.isoformat()}")
assert response.status == 200
response_json = await response.json()
assert len(response_json) == 3
assert response_json[0]["domain"] == "homeassistant"
assert response_json[1]["entity_id"] == "light.kitchen"
assert response_json[2]["entity_id"] == "light.kitchen"
async def test_logbook_entity_context_id(hass, hass_client):
"""Test the logbook view with end_time and entity with automations and scripts."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await async_setup_component(hass, "automation", {})
await async_setup_component(hass, "script", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
context = ha.Context(
id="ac5bd62de45711eaaeb351041eec8dd9",
user_id="b400facee45711eaa9308bfd3d19e474",
)
# An Automation
automation_entity_id_test = "automation.alarm"
hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_NAME: "Mock automation", ATTR_ENTITY_ID: automation_entity_id_test},
context=context,
)
hass.bus.async_fire(
EVENT_SCRIPT_STARTED,
{ATTR_NAME: "Mock script", ATTR_ENTITY_ID: "script.mock_script"},
context=context,
)
hass.states.async_set(
automation_entity_id_test,
STATE_ON,
{ATTR_FRIENDLY_NAME: "Alarm Automation"},
context=context,
)
entity_id_test = "alarm_control_panel.area_001"
hass.states.async_set(entity_id_test, STATE_OFF, context=context)
await hass.async_block_till_done()
hass.states.async_set(entity_id_test, STATE_ON, context=context)
await hass.async_block_till_done()
entity_id_second = "alarm_control_panel.area_002"
hass.states.async_set(entity_id_second, STATE_OFF, context=context)
await hass.async_block_till_done()
hass.states.async_set(entity_id_second, STATE_ON, context=context)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
await hass.async_add_executor_job(
logbook.log_entry,
hass,
"mock_name",
"mock_message",
"alarm_control_panel",
"alarm_control_panel.area_003",
context,
)
await hass.async_block_till_done()
await hass.async_add_executor_job(
logbook.log_entry,
hass,
"mock_name",
"mock_message",
"homeassistant",
None,
context,
)
await hass.async_block_till_done()
# A service call
light_turn_off_service_context = ha.Context(
id="9c5bd62de45711eaaeb351041eec8dd9",
user_id="9400facee45711eaa9308bfd3d19e474",
)
hass.states.async_set("light.switch", STATE_ON)
await hass.async_block_till_done()
hass.bus.async_fire(
EVENT_CALL_SERVICE,
{
ATTR_DOMAIN: "light",
ATTR_SERVICE: "turn_off",
ATTR_ENTITY_ID: "light.switch",
},
context=light_turn_off_service_context,
)
await hass.async_block_till_done()
hass.states.async_set(
"light.switch", STATE_OFF, context=light_turn_off_service_context
)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}"
)
assert response.status == 200
json_dict = await response.json()
assert json_dict[0]["entity_id"] == "automation.alarm"
assert "context_entity_id" not in json_dict[0]
assert json_dict[0]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[1]["entity_id"] == "script.mock_script"
assert json_dict[1]["context_event_type"] == "automation_triggered"
assert json_dict[1]["context_entity_id"] == "automation.alarm"
assert json_dict[1]["context_entity_id_name"] == "Alarm Automation"
assert json_dict[1]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[2]["entity_id"] == entity_id_test
assert json_dict[2]["context_event_type"] == "automation_triggered"
assert json_dict[2]["context_entity_id"] == "automation.alarm"
assert json_dict[2]["context_entity_id_name"] == "Alarm Automation"
assert json_dict[2]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[3]["entity_id"] == entity_id_second
assert json_dict[3]["context_event_type"] == "automation_triggered"
assert json_dict[3]["context_entity_id"] == "automation.alarm"
assert json_dict[3]["context_entity_id_name"] == "Alarm Automation"
assert json_dict[3]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[4]["domain"] == "homeassistant"
assert json_dict[5]["entity_id"] == "alarm_control_panel.area_003"
assert json_dict[5]["context_event_type"] == "automation_triggered"
assert json_dict[5]["context_entity_id"] == "automation.alarm"
assert json_dict[5]["domain"] == "alarm_control_panel"
assert json_dict[5]["context_entity_id_name"] == "Alarm Automation"
assert json_dict[5]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[6]["domain"] == "homeassistant"
assert json_dict[6]["context_user_id"] == "b400facee45711eaa9308bfd3d19e474"
assert json_dict[7]["entity_id"] == "light.switch"
assert json_dict[7]["context_event_type"] == "call_service"
assert json_dict[7]["context_domain"] == "light"
assert json_dict[7]["context_service"] == "turn_off"
assert json_dict[7]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
async def test_logbook_context_from_template(hass, hass_client):
"""Test the logbook view with end_time and entity with automations and scripts."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
# Entity added (should not be logged)
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
# First state change (should be logged)
hass.states.async_set("switch.test_state", STATE_OFF)
await hass.async_block_till_done()
switch_turn_off_context = ha.Context(
id="9c5bd62de45711eaaeb351041eec8dd9",
user_id="9400facee45711eaa9308bfd3d19e474",
)
hass.states.async_set(
"switch.test_state", STATE_ON, context=switch_turn_off_context
)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}"
)
assert response.status == 200
json_dict = await response.json()
assert json_dict[0]["domain"] == "homeassistant"
assert "context_entity_id" not in json_dict[0]
assert json_dict[1]["entity_id"] == "switch.test_template_switch"
assert json_dict[2]["entity_id"] == "switch.test_state"
assert json_dict[3]["entity_id"] == "switch.test_template_switch"
assert json_dict[3]["context_entity_id"] == "switch.test_state"
assert json_dict[3]["context_entity_id_name"] == "test state"
assert json_dict[4]["entity_id"] == "switch.test_state"
assert json_dict[4]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
assert json_dict[5]["entity_id"] == "switch.test_template_switch"
assert json_dict[5]["context_entity_id"] == "switch.test_state"
assert json_dict[5]["context_entity_id_name"] == "test state"
assert json_dict[5]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
async def test_logbook_entity_matches_only(hass, hass_client):
"""Test the logbook view with a single entity and entity_matches_only."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
# Entity added (should not be logged)
hass.states.async_set("switch.test_state", STATE_ON)
await hass.async_block_till_done()
# First state change (should be logged)
hass.states.async_set("switch.test_state", STATE_OFF)
await hass.async_block_till_done()
switch_turn_off_context = ha.Context(
id="9c5bd62de45711eaaeb351041eec8dd9",
user_id="9400facee45711eaa9308bfd3d19e474",
)
hass.states.async_set(
"switch.test_state", STATE_ON, context=switch_turn_off_context
)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test_state&entity_matches_only"
)
assert response.status == 200
json_dict = await response.json()
assert len(json_dict) == 2
assert json_dict[0]["entity_id"] == "switch.test_state"
assert json_dict[1]["entity_id"] == "switch.test_state"
assert json_dict[1]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
async def test_logbook_entity_matches_only_multiple(hass, hass_client):
"""Test the logbook view with a multiple entities and entity_matches_only."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
assert await async_setup_component(
hass,
"switch",
{
"switch": {
"platform": "template",
"switches": {
"test_template_switch": {
"value_template": "{{ states.switch.test_state.state }}",
"turn_on": {
"service": "switch.turn_on",
"entity_id": "switch.test_state",
},
"turn_off": {
"service": "switch.turn_off",
"entity_id": "switch.test_state",
},
}
},
}
},
)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
# Entity added (should not be logged)
hass.states.async_set("switch.test_state", STATE_ON)
hass.states.async_set("light.test_state", STATE_ON)
await hass.async_block_till_done()
# First state change (should be logged)
hass.states.async_set("switch.test_state", STATE_OFF)
hass.states.async_set("light.test_state", STATE_OFF)
await hass.async_block_till_done()
switch_turn_off_context = ha.Context(
id="9c5bd62de45711eaaeb351041eec8dd9",
user_id="9400facee45711eaa9308bfd3d19e474",
)
hass.states.async_set(
"switch.test_state", STATE_ON, context=switch_turn_off_context
)
hass.states.async_set("light.test_state", STATE_ON, context=switch_turn_off_context)
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test_state,light.test_state&entity_matches_only"
)
assert response.status == 200
json_dict = await response.json()
assert len(json_dict) == 4
assert json_dict[0]["entity_id"] == "switch.test_state"
assert json_dict[1]["entity_id"] == "light.test_state"
assert json_dict[2]["entity_id"] == "switch.test_state"
assert json_dict[2]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
assert json_dict[3]["entity_id"] == "light.test_state"
assert json_dict[3]["context_user_id"] == "9400facee45711eaa9308bfd3d19e474"
async def test_logbook_invalid_entity(hass, hass_client):
"""Test the logbook view with requesting an invalid entity."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_block_till_done()
client = await hass_client()
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day)
# Test today entries with filter by end_time
end_time = start + timedelta(hours=24)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=invalid&entity_matches_only"
)
assert response.status == 500
async def test_icon_and_state(hass, hass_client):
"""Test to ensure state and custom icons are returned."""
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", {})
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.states.async_set("light.kitchen", STATE_OFF, {"icon": "mdi:chemical-weapon"})
hass.states.async_set(
"light.kitchen", STATE_ON, {"brightness": 100, "icon": "mdi:security"}
)
hass.states.async_set(
"light.kitchen", STATE_ON, {"brightness": 200, "icon": "mdi:security"}
)
hass.states.async_set(
"light.kitchen", STATE_ON, {"brightness": 300, "icon": "mdi:security"}
)
hass.states.async_set(
"light.kitchen", STATE_ON, {"brightness": 400, "icon": "mdi:security"}
)
hass.states.async_set("light.kitchen", STATE_OFF, {"icon": "mdi:chemical-weapon"})
await _async_commit_and_wait(hass)
client = await hass_client()
response_json = await _async_fetch_logbook(client)
assert len(response_json) == 3
assert response_json[0]["domain"] == "homeassistant"
assert response_json[1]["entity_id"] == "light.kitchen"
assert response_json[1]["icon"] == "mdi:security"
assert response_json[1]["state"] == STATE_ON
assert response_json[2]["entity_id"] == "light.kitchen"
assert response_json[2]["icon"] == "mdi:chemical-weapon"
assert response_json[2]["state"] == STATE_OFF
async def test_exclude_events_domain(hass, hass_client):
"""Test if events are filtered if domain is excluded in config."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {CONF_EXCLUDE: {CONF_DOMAINS: ["switch", "alexa"]}},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 2
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
async def test_exclude_events_domain_glob(hass, hass_client):
"""Test if events are filtered if domain or glob is excluded in config."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
entity_id3 = "sensor.excluded"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_EXCLUDE: {
CONF_DOMAINS: ["switch", "alexa"],
CONF_ENTITY_GLOBS: "*.excluded",
}
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
hass.states.async_set(entity_id3, None)
hass.states.async_set(entity_id3, 30)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 2
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
async def test_include_events_entity(hass, hass_client):
"""Test if events are filtered if entity is included in config."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_INCLUDE: {
CONF_DOMAINS: ["homeassistant"],
CONF_ENTITIES: [entity_id2],
}
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 2
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
async def test_exclude_events_entity(hass, hass_client):
"""Test if events are filtered if entity is excluded in config."""
entity_id = "sensor.bla"
entity_id2 = "sensor.blu"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {CONF_EXCLUDE: {CONF_ENTITIES: [entity_id]}},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 2
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
async def test_include_events_domain(hass, hass_client):
"""Test if events are filtered if domain is included in config."""
assert await async_setup_component(hass, "alexa", {})
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_INCLUDE: {CONF_DOMAINS: ["homeassistant", "sensor", "alexa"]}
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.bus.async_fire(
EVENT_ALEXA_SMART_HOME,
{"request": {"namespace": "Alexa.Discovery", "name": "Discover"}},
)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 3
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="Amazon Alexa", domain="alexa")
_assert_entry(entries[2], name="blu", entity_id=entity_id2)
async def test_include_events_domain_glob(hass, hass_client):
"""Test if events are filtered if domain or glob is included in config."""
assert await async_setup_component(hass, "alexa", {})
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
entity_id3 = "switch.included"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_INCLUDE: {
CONF_DOMAINS: ["homeassistant", "sensor", "alexa"],
CONF_ENTITY_GLOBS: ["*.included"],
}
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.bus.async_fire(
EVENT_ALEXA_SMART_HOME,
{"request": {"namespace": "Alexa.Discovery", "name": "Discover"}},
)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 20)
hass.states.async_set(entity_id3, None)
hass.states.async_set(entity_id3, 30)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 4
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="Amazon Alexa", domain="alexa")
_assert_entry(entries[2], name="blu", entity_id=entity_id2)
_assert_entry(entries[3], name="included", entity_id=entity_id3)
async def test_include_exclude_events(hass, hass_client):
"""Test if events are filtered if include and exclude is configured."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
entity_id3 = "sensor.bli"
entity_id4 = "sensor.keep"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_INCLUDE: {
CONF_DOMAINS: ["sensor", "homeassistant"],
CONF_ENTITIES: ["switch.bla"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["switch"],
CONF_ENTITIES: ["sensor.bli"],
},
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 10)
hass.states.async_set(entity_id3, None)
hass.states.async_set(entity_id3, 10)
hass.states.async_set(entity_id, 20)
hass.states.async_set(entity_id2, 20)
hass.states.async_set(entity_id4, None)
hass.states.async_set(entity_id4, 10)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 3
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
_assert_entry(entries[2], name="keep", entity_id=entity_id4)
async def test_include_exclude_events_with_glob_filters(hass, hass_client):
"""Test if events are filtered if include and exclude is configured."""
entity_id = "switch.bla"
entity_id2 = "sensor.blu"
entity_id3 = "sensor.bli"
entity_id4 = "light.included"
entity_id5 = "switch.included"
entity_id6 = "sensor.excluded"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {
CONF_INCLUDE: {
CONF_DOMAINS: ["sensor", "homeassistant"],
CONF_ENTITIES: ["switch.bla"],
CONF_ENTITY_GLOBS: ["*.included"],
},
CONF_EXCLUDE: {
CONF_DOMAINS: ["switch"],
CONF_ENTITY_GLOBS: ["*.excluded"],
CONF_ENTITIES: ["sensor.bli"],
},
},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
hass.states.async_set(entity_id2, None)
hass.states.async_set(entity_id2, 10)
hass.states.async_set(entity_id3, None)
hass.states.async_set(entity_id3, 10)
hass.states.async_set(entity_id, 20)
hass.states.async_set(entity_id2, 20)
hass.states.async_set(entity_id4, None)
hass.states.async_set(entity_id4, 30)
hass.states.async_set(entity_id5, None)
hass.states.async_set(entity_id5, 30)
hass.states.async_set(entity_id6, None)
hass.states.async_set(entity_id6, 30)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 3
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id2)
_assert_entry(entries[2], name="included", entity_id=entity_id4)
async def test_empty_config(hass, hass_client):
"""Test we can handle an empty entity filter."""
entity_id = "sensor.blu"
config = logbook.CONFIG_SCHEMA(
{
ha.DOMAIN: {},
logbook.DOMAIN: {},
}
)
await hass.async_add_executor_job(init_recorder_component, hass)
await async_setup_component(hass, "logbook", config)
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
hass.states.async_set(entity_id, None)
hass.states.async_set(entity_id, 10)
await _async_commit_and_wait(hass)
client = await hass_client()
entries = await _async_fetch_logbook(client)
assert len(entries) == 2
_assert_entry(
entries[0], name="Home Assistant", message="started", domain=ha.DOMAIN
)
_assert_entry(entries[1], name="blu", entity_id=entity_id)
async def _async_fetch_logbook(client):
# Today time 00:00:00
start = dt_util.utcnow().date()
start_date = datetime(start.year, start.month, start.day) - timedelta(hours=24)
# Test today entries without filters
end_time = start + timedelta(hours=48)
response = await client.get(
f"/api/logbook/{start_date.isoformat()}?end_time={end_time}"
)
assert response.status == 200
return await response.json()
async def _async_commit_and_wait(hass):
await hass.async_block_till_done()
await hass.async_add_executor_job(trigger_db_commit, hass)
await hass.async_block_till_done()
await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)
await hass.async_block_till_done()
def _assert_entry(
entry, when=None, name=None, message=None, domain=None, entity_id=None
):
"""Assert an entry is what is expected."""
if when:
assert when.isoformat() == entry["when"]
if name:
assert name == entry["name"]
if message:
assert message == entry["message"]
if domain:
assert domain == entry["domain"]
if entity_id:
assert entity_id == entry["entity_id"]
class MockLazyEventPartialState(ha.Event):
"""Minimal mock of a Lazy event."""
@property
def data_entity_id(self):
"""Lookup entity id."""
return self.data.get(ATTR_ENTITY_ID)
@property
def data_domain(self):
"""Lookup domain."""
return self.data.get(ATTR_DOMAIN)
@property
def time_fired_minute(self):
"""Minute the event was fired."""
return self.time_fired.minute
@property
def context_user_id(self):
"""Context user id of event."""
return self.context.user_id
@property
def context_id(self):
"""Context id of event."""
return self.context.id
@property
def time_fired_isoformat(self):
"""Time event was fired in utc isoformat."""
return process_timestamp_to_utc_isoformat(self.time_fired)
| GenericStudent/home-assistant | tests/components/logbook/test_init.py | Python | apache-2.0 | 61,935 |
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from types import ModuleType
from distutils.version import StrictVersion
from neutron.plugins.ml2.drivers import type_tunnel
from neutron import version
# Some constants and verifier functions have been deprecated but are still
# used by earlier releases of neutron. In order to maintain
# backwards-compatibility with stable/mitaka this will act as a translator
# that passes constants and functions according to version number.
NEUTRON_VERSION = StrictVersion(str(version.version_info))
NEUTRON_NEWTON_VERSION = StrictVersion('9.0.0')
NEUTRON_OCATA_VERSION = StrictVersion('10.0.0')
NEUTRON_PIKE_VERSION = StrictVersion('11.0.0')
n_c = __import__('neutron.common.constants', fromlist=['common.constants'])
constants = __import__('neutron_lib.constants', fromlist=['constants'])
if NEUTRON_VERSION >= NEUTRON_NEWTON_VERSION:
from neutron.conf import common as base_config
from neutron_lib.api import validators
is_attr_set = validators.is_attr_set
validators = validators.validators
n_c_attr_names = getattr(n_c, "_mg__my_globals", None)
else:
from neutron.api.v2 import attributes
from neutron.common import config as base_config
n_c_attr_names = n_c.my_globals
is_attr_set = attributes.is_attr_set
validators = attributes.validators
setattr(constants, 'ATTR_NOT_SPECIFIED', getattr(attributes,
'ATTR_NOT_SPECIFIED'))
if NEUTRON_VERSION >= NEUTRON_OCATA_VERSION:
from neutron.db.models import agent as agent_model
from neutron.db.models import l3 as l3_models
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib.api import extensions
from neutron_lib.db import model_base
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from neutron_lib.utils import helpers as common_utils
try:
from neutron import context
except ImportError:
from neutron_lib import context
get_plugin = directory.get_plugin
n_c_attr_names = dir(n_c)
HasProject = model_base.HasProject
VXLAN_TUNNEL_TYPE = type_tunnel.ML2TunnelTypeDriver
Agent = agent_model.Agent
RouterPort = l3_models.RouterPort
Router = l3_models.Router
def get_context():
return context.Context()
def get_db_ref(context):
return context
def get_tunnel_session(context):
return context.session
def get_novaclient_images(nclient):
return nclient.glance
else:
from neutron.api import extensions # noqa
from neutron.common import utils as common_utils # noqa
from neutron import context
from neutron.db import agents_db
from neutron.db import api as db_api
from neutron.db import l3_db
from neutron.db import model_base # noqa
from neutron.db import models_v2
from neutron.extensions import portbindings # noqa
from neutron.extensions import providernet # noqa
from neutron import manager
from neutron.plugins.common import constants as svc_constants
from neutron.services import service_base # noqa
def get_plugin(service=None):
if service is None:
return manager.NeutronManager.get_plugin()
else:
return manager.NeutronManager.get_service_plugins().get(service)
HasProject = models_v2.HasTenant
setattr(constants, 'L3', getattr(svc_constants, 'L3_ROUTER_NAT'))
VXLAN_TUNNEL_TYPE = type_tunnel.TunnelTypeDriver
Agent = agents_db.Agent
RouterPort = l3_db.RouterPort
Router = l3_db.Router
def get_context():
return None
def get_db_ref(context):
return db_api.get_session()
def get_tunnel_session(context):
return context
def get_novaclient_images(nclient):
return nclient.images
if NEUTRON_VERSION >= NEUTRON_PIKE_VERSION:
from neutron.conf.agent import common as config
else:
from neutron.agent.common import config # noqa
core_opts = base_config.core_opts
# Bring in the union of all constants in neutron.common.constants
# and neutron_lib.constants. Handle any duplicates by using the
# values in neutron_lib.
#
# In the plugin code, replace the following imports:
# from neutron.common import constants
# from neutron_lib import constants
# with (something like this):
# from networking_cisco import backward_compatibility as bc
# Then constants are referenced as shown in this example:
# port['devide_owner'] = bc.constants.DEVICE_OWNER_ROUTER_INTF
ignore = frozenset(['__builtins__', '__doc__', '__file__', '__name__',
'__package__', '__path__', '__version__'])
for attr_name in n_c_attr_names:
attr = getattr(n_c, attr_name)
if attr_name in ignore or isinstance(attr, ModuleType):
continue
else:
setattr(constants, attr_name, attr)
del n_c, ignore, attr_name, attr
| Gitweijie/first_project | networking_cisco/backwards_compatibility.py | Python | apache-2.0 | 5,581 |
"""Code to handle a Hue bridge."""
import asyncio
from functools import partial
from aiohttp import client_exceptions
import aiohue
import async_timeout
import slugify as unicode_slug
import voluptuous as vol
from homeassistant import core
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import DOMAIN, LOGGER
from .errors import AuthenticationRequired, CannotConnect
from .helpers import create_config_flow
from .sensor_base import SensorManager
SERVICE_HUE_SCENE = "hue_activate_scene"
ATTR_GROUP_NAME = "group_name"
ATTR_SCENE_NAME = "scene_name"
SCENE_SCHEMA = vol.Schema(
{vol.Required(ATTR_GROUP_NAME): cv.string, vol.Required(ATTR_SCENE_NAME): cv.string}
)
# How long should we sleep if the hub is busy
HUB_BUSY_SLEEP = 0.01
class HueBridge:
"""Manages a single Hue bridge."""
def __init__(self, hass, config_entry, allow_unreachable, allow_groups):
"""Initialize the system."""
self.config_entry = config_entry
self.hass = hass
self.allow_unreachable = allow_unreachable
self.allow_groups = allow_groups
self.available = True
self.authorized = False
self.api = None
self.parallel_updates_semaphore = None
# Jobs to be executed when API is reset.
self.reset_jobs = []
self.sensor_manager = None
@property
def host(self):
"""Return the host of this bridge."""
return self.config_entry.data["host"]
async def async_setup(self, tries=0):
"""Set up a phue bridge based on host parameter."""
host = self.host
hass = self.hass
bridge = aiohue.Bridge(
host,
username=self.config_entry.data["username"],
websession=aiohttp_client.async_get_clientsession(hass),
)
try:
await authenticate_bridge(hass, bridge)
except AuthenticationRequired:
# Usernames can become invalid if hub is reset or user removed.
# We are going to fail the config entry setup and initiate a new
# linking procedure. When linking succeeds, it will remove the
# old config entry.
create_config_flow(hass, host)
return False
except CannotConnect:
LOGGER.error("Error connecting to the Hue bridge at %s", host)
raise ConfigEntryNotReady
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unknown error connecting with Hue bridge at %s", host)
return False
self.api = bridge
self.sensor_manager = SensorManager(self)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "light")
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
self.config_entry, "binary_sensor"
)
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "sensor")
)
hass.services.async_register(
DOMAIN, SERVICE_HUE_SCENE, self.hue_activate_scene, schema=SCENE_SCHEMA
)
self.parallel_updates_semaphore = asyncio.Semaphore(
3 if self.api.config.modelid == "BSB001" else 10
)
self.authorized = True
return True
async def async_request_call(self, task):
"""Limit parallel requests to Hue hub.
The Hue hub can only handle a certain amount of parallel requests, total.
Although we limit our parallel requests, we still will run into issues because
other products are hitting up Hue.
ClientOSError means hub closed the socket on us.
ContentResponseError means hub raised an error.
Since we don't make bad requests, this is on them.
"""
async with self.parallel_updates_semaphore:
for tries in range(4):
try:
return await task()
except (
client_exceptions.ClientOSError,
client_exceptions.ClientResponseError,
) as err:
if tries == 3 or (
# We only retry if it's a server error. So raise on all 4XX errors.
isinstance(err, client_exceptions.ClientResponseError)
and err.status < 500
):
raise
await asyncio.sleep(HUB_BUSY_SLEEP * tries)
async def async_reset(self):
"""Reset this bridge to default state.
Will cancel any scheduled setup retry and will unload
the config entry.
"""
# The bridge can be in 3 states:
# - Setup was successful, self.api is not None
# - Authentication was wrong, self.api is None, not retrying setup.
# If the authentication was wrong.
if self.api is None:
return True
self.hass.services.async_remove(DOMAIN, SERVICE_HUE_SCENE)
while self.reset_jobs:
self.reset_jobs.pop()()
# If setup was successful, we set api variable, forwarded entry and
# register service
results = await asyncio.gather(
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "light"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "binary_sensor"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "sensor"
),
)
# None and True are OK
return False not in results
async def hue_activate_scene(self, call, updated=False):
"""Service to call directly into bridge to set scenes."""
group_name = call.data[ATTR_GROUP_NAME]
scene_name = call.data[ATTR_SCENE_NAME]
group = next(
(group for group in self.api.groups.values() if group.name == group_name),
None,
)
# Additional scene logic to handle duplicate scene names across groups
scene = next(
(
scene
for scene in self.api.scenes.values()
if scene.name == scene_name
and group is not None
and sorted(scene.lights) == sorted(group.lights)
),
None,
)
# If we can't find it, fetch latest info.
if not updated and (group is None or scene is None):
await self.async_request_call(self.api.groups.update)
await self.async_request_call(self.api.scenes.update)
await self.hue_activate_scene(call, updated=True)
return
if group is None:
LOGGER.warning("Unable to find group %s", group_name)
return
if scene is None:
LOGGER.warning("Unable to find scene %s", scene_name)
return
await self.async_request_call(partial(group.set_action, scene=scene.id))
async def handle_unauthorized_error(self):
"""Create a new config flow when the authorization is no longer valid."""
if not self.authorized:
# we already created a new config flow, no need to do it again
return
LOGGER.error(
"Unable to authorize to bridge %s, setup the linking again.", self.host
)
self.authorized = False
create_config_flow(self.hass, self.host)
async def authenticate_bridge(hass: core.HomeAssistant, bridge: aiohue.Bridge):
"""Create a bridge object and verify authentication."""
try:
with async_timeout.timeout(10):
# Create username if we don't have one
if not bridge.username:
device_name = unicode_slug.slugify(
hass.config.location_name, max_length=19
)
await bridge.create_user(f"home-assistant#{device_name}")
# Initialize bridge (and validate our username)
await bridge.initialize()
except (aiohue.LinkButtonNotPressed, aiohue.Unauthorized):
raise AuthenticationRequired
except (asyncio.TimeoutError, client_exceptions.ClientOSError):
raise CannotConnect
except aiohue.AiohueException:
LOGGER.exception("Unknown Hue linking error occurred")
raise AuthenticationRequired
| postlund/home-assistant | homeassistant/components/hue/bridge.py | Python | apache-2.0 | 8,552 |
from __future__ import division, print_function
import os, json
from glob import glob
import numpy as np
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.utils.data_utils import get_file
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers.convolutional import Conv2D, MaxPooling2D, ZeroPadding2D
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.preprocessing import image
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
def vgg_preprocess(x):
x = x - vgg_mean
return x[:, ::-1] # reverse axis rgb->bgr
class Vgg16():
"""The VGG 16 Imagenet model"""
def __init__(self):
self.FILE_PATH = 'http://www.platform.ai/models/'
self.create()
self.get_classes()
def get_classes(self):
fname = 'imagenet_class_index.json'
if not os.path.exists(fname):
fname = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
with open(fname) as f:
class_dict = json.load(f)
self.classes = [class_dict[str(i)][1] for i in range(len(class_dict))]
def predict(self, imgs, details=False):
all_preds = self.model.predict(imgs)
idxs = np.argmax(all_preds, axis=1)
preds = [all_preds[i, idxs[i]] for i in range(len(idxs))]
classes = [self.classes[idx] for idx in idxs]
return np.array(preds), idxs, classes
def ConvBlock(self, layers, filters):
model = self.model
for i in range(layers):
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(filters, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
def FCBlock(self):
model = self.model
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.5))
def create(self):
model = self.model = Sequential()
model.add(Lambda(vgg_preprocess, input_shape=(3,224,224), output_shape=(3,224,224)))
self.ConvBlock(2, 64)
self.ConvBlock(2, 128)
self.ConvBlock(3, 256)
self.ConvBlock(3, 512)
self.ConvBlock(3, 512)
model.add(Flatten())
self.FCBlock()
self.FCBlock()
model.add(Dense(1000, activation='softmax'))
fname = 'vgg16.h5'
if not os.path.exists(fname):
fname = get_file(fname, self.FILE_PATH+fname, cache_subdir='models')
model.load_weights(fname)
def get_batches(self, path, gen=image.ImageDataGenerator(), shuffle=True, batch_size=8, class_mode='categorical'):
return gen.flow_from_directory(path, target_size=(224,224),
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def ft(self, num):
model = self.model
model.pop()
for layer in model.layers: layer.trainable=False
model.add(Dense(num, activation='softmax'))
self.compile()
def finetune(self, batches):
self.ft(batches.num_class)
classes = list(iter(batches.class_indices))
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
self.classes = classes
def compile(self, lr=0.001):
self.model.compile(optimizer=Adam(lr=lr),
loss='categorical_crossentropy', metrics=['accuracy'])
def fit_data(self, trn, labels, val, val_labels, nb_epoch=1, batch_size=64):
self.model.fit(trn, labels, nb_epoch=nb_epoch,
validation_data=(val, val_labels), batch_size=batch_size)
def fit(self, batches, val_batches, nb_epoch=1):
self.model.fit_generator(batches, steps_per_epoch=batches.samples/batches.batch_size, epochs=nb_epoch,
validation_data=val_batches, validation_steps=val_batches.samples/val_batches.batch_size)
def test(self, path, batch_size=8):
test_batches = self.get_batches(path, shuffle=False, batch_size=batch_size, class_mode=None)
return test_batches, self.model.predict_generator(test_batches, test_batches.samples/batch_size)
| VadimMalykh/courses | deeplearning1/nbs/vgg16.py | Python | apache-2.0 | 4,286 |
from __future__ import print_function, absolute_import
import re
class CNFParser(object):
"""Stream parser for DIMACS format"""
def __init__(self, file_object):
self.file_object = file_object
self.var_count = 0
self.clauses = []
self.remove_doubles = re.compile(r'\s\s+')
self._parse()
def _parse(self):
# find the header
line_number, header = self._find_header()
elms = header.split()
if len(elms) != 4:
raise ValueError('Unrecognized cnf header: "{}"'.format(header))
sig, filetype, var_count, clause_count = elms
self.var_count = int(var_count)
self.clause_count = int(clause_count)
for i, line in enumerate(self.file_object):
self._parse_line(line_number + i, line)
def _warn(self, msg):
print('Warning: {}'.format(msg))
def _find_header(self):
for line_number, line in enumerate(self.file_object):
line = line.strip()
if line[0] == 'p':
return line_number, line
elif line[0] == 'c' or line == '':
continue
else:
raise Exception('Unexpected header on line {}: "{}"'
.format(line_number, line))
def _parse_line(self, line_number, line):
line = line.strip()
if len(line) == 0:
return
# Be flexible with comments (since some benchmarks use either)
if line[0] in '#c':
return
line = self.remove_doubles.sub(r' ', line)
elms = line.split()
clause = []
for clause_str in elms:
clause_str = clause_str.strip()
try:
literal = int(clause_str)
except ValueError:
self._warn('Error in line #{} -- could not parse "{}"'.format(
line_number, clause_str))
continue
if literal == 0:
break
clause.append(literal)
if len(clause) > 0:
self.clauses.append(clause)
class CNFFileParser(CNFParser):
"""Parse DIMACS files"""
def __init__(self, filename):
self.filename = filename
with open(filename, 'r') as f:
super(CNFFileParser, self).__init__(f)
| kunalarya/simple-sat-solver | satsolver/parser.py | Python | apache-2.0 | 2,344 |
# -*- coding: utf-8 -*-
from . import app, db
from flask import request, g, session, redirect
from Lotus.model.user import User
from hashlib import md5
from Lotus.lib.msg_code import Msg
import json
@app.route('/user/login', methods=['POST'])
def user_login():
email = request.form.get('email', None)
psw = request.form.get('psw', None)
if email is not None and psw is not None:
users = User.query.filter_by(email=email, psw=psw)
if users:
g.user = users[0]
session['userid'] = users[0].userid
else:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'user not exist')
else:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'params not enougth')
@app.route('/user/register', methods=['POST'])
def user_register():
# todo (参数不够)有插入异常怎么办?
# todo 忘记密码..
try:
u = User()
u.username = request.form.get('username', None)
u.description = request.form.get('description', None)
u.type = request.form.get('type', User.CONST_TYPE_USER)
u.email = request.form.get('email', None)
m = md5()
m.update(request.form.get('psw', User.CONST_DEFAULT_PASSWORD)) # 默认密码
u.psw = m.hexdigest()
db.session.add(u)
db.session.commit()
except Exception as e:
return '{"code":%d,"msg":$s}'.format(Msg['faild'], 'register faild')
return '{"code":%d,"msg":$s}'.format(Msg['success'], 'register success')
@app.route('/user/<int:userid>/avatar', methods=['GET', 'POST'])
def user_avatar(userid):
#upload
#TODO support upload avater
if request.method == 'POST':
pass
else:
pass
@app.route('/user/<int:userid>/profile', methods=['GET'])
def user_profile(userid):
if session.get('userid'):
result = {
'userid': g.user.userid,
'username': g.user.username,
'avatar': g.user.avatar,
'description': g.user.description,
'type': g.user.type,
'email': g.user.email
}
return json.dumps(result)
else:
redirect('/user/login')
@app.route('/user/<int:userid>/issue/sends/page/<int:page>', methods=['GET'])
def user_issues_send(userid, page):
pass
@app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET'])
def user_issues_favour(userid, page):
pass
@app.route('/user/<int:userid>/issue/favours/page/<int:page>', methods=['GET'])
def user_messages(userid, page):
pass
| Jayin/Lotus | Lotus/controller/user.py | Python | apache-2.0 | 2,550 |
import os
import sys
import numpy as np
sys.path.insert(0, os.getcwd() + '/../../tools/')
import lstm
import wb
import wer
def rescore_all(workdir, nbestdir, config):
for tsk in ['nbestlist_{}_{}'.format(a, b) for a in ['dt05', 'et05'] for b in ['real', 'simu']]:
print('process ' + tsk)
nbest_txt = nbestdir + tsk + '/words_text'
outdir = workdir + nbestdir.split('/')[-2] + '/' + tsk + '/'
wb.mkdir(outdir)
write_lmscore = outdir + 'lmwt.lstm'
lstm.rescore(workdir, nbest_txt, write_lmscore, config)
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) == 1:
print(
' \"python run.py -train\" train LSTM\n \"python run.py -rescore\" rescore nbest\n \"python run.py -wer\" compute WER')
absdir = os.getcwd() + '/'
train = absdir + 'data/train'
valid = absdir + 'data/valid'
nbestdir = absdir + 'data/nbest/nbest_mvdr_single_heq_multi/'
workdir = absdir + 'lstmlm/'
wb.mkdir(workdir)
os.chdir('../../tools/lstm/')
config = '-hidden 500 -epoch 10 -dropout 0 -gpu 2'
if '-train' in sys.argv:
lstm.train(workdir, train, valid, valid, config)
if '-test' in sys.argv:
lstm.ppl(workdir, train, config)
lstm.ppl(workdir, valid, config)
if '-rescore' in sys.argv:
rescore_all(workdir, nbestdir, config)
if '-wer' in sys.argv:
lmpaths = {'KN5': nbestdir + '<tsk>/lmwt.lmonly',
'RNN': nbestdir + '<tsk>/lmwt.rnn',
'LSTM': workdir + nbestdir.split('/')[-2] + '/<tsk>/lmwt.lstm',
'TRF': '/home/ozj/NAS_workspace/wangb/Experiments/ChiME4/lmscore/' + nbestdir.split('/')[
-2] + '/<tsk>/lmwt.trf'}
# 'TRF': nbestdir + '<tsk>/lmwt.trf'}
lmtypes = ['LSTM', 'KN5', 'RNN', 'TRF', 'RNN+KN5', 'LSTM+KN5', 'RNN+TRF', 'LSTM+TRF']
# lmtypes = ['LSTM', 'LSTM+TRF']
wer_workdir = absdir + 'wer/' + nbestdir.split('/')[-2] + '/'
print('wer_workdir = ' + wer_workdir)
wer.wer_all(wer_workdir, nbestdir, lmpaths, lmtypes)
config = wer.wer_tune(wer_workdir)
wer.wer_print(wer_workdir, config)
| wbengine/SPMILM | egs/chime4/run_lstm.py | Python | apache-2.0 | 2,187 |
Subsets and Splits