code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from twisted.trial.unittest import TestCase
from mock import Mock
from twisted.web.test.test_web import DummyRequest
from twisted.web.http import OK, NOT_FOUND
from cryptosync.resources import make_site
def make_request(uri='', method='GET', args={}):
site = make_site(authenticator=Mock())
request = DummyRequest(uri.split('/'))
request.method = method
request.args = args
resource = site.getResourceFor(request)
request.render(resource)
request.data = "".join(request.written)
return request
class RootResourceResponseCodesTestCase(TestCase):
def test_root_resource_ok(self):
request = make_request()
self.assertEquals(request.responseCode, OK)
def test_root_resource_not_found_url(self):
request = make_request(uri='shouldneverfindthisthing')
self.assertEquals(request.responseCode, NOT_FOUND)
class AuthResourceTestCase(TestCase):
def _try_auth(self, credentials, expected):
request = make_request(uri='/auth/', method='POST', args=credentials)
self.assertEquals(request.responseCode, OK)
self.assertEquals(request.data, expected)
def test_auth_success_with_good_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
self._try_auth(credentials, '{"status": "success"}')
def test_auth_failure_with_missing_parameters(self):
credentials = {'username': 'myself', 'password': 'somethingawesome'}
for (k, v) in credentials.items():
self._try_auth({k: v}, '{"status": "failure"}')
| shyba/cryptosync | cryptosync/tests/test_webserver.py | Python | agpl-3.0 | 1,579 |
# Copyright 2017 ForgeFlow S.L.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Purchase Order Approved",
"summary": "Add a new state 'Approved' in purchase orders.",
"version": "14.0.1.1.0",
"category": "Purchases",
"website": "https://github.com/OCA/purchase-workflow",
"author": "ForgeFlow, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"depends": ["purchase_stock"],
"data": ["views/purchase_order_view.xml", "views/res_config_view.xml"],
}
| OCA/purchase-workflow | purchase_order_approved/__manifest__.py | Python | agpl-3.0 | 569 |
"""
Copyright (C) 2008 by Steven Wallace
[email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the
Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import with_statement
import struct
import threading
import sys, traceback, time
def cascadeSetIn(a, b):
a.setIn(b)
return b
class NetworkException(Exception):
pass
class Filter:
def __init__(self, *args):
self.input = None
self.output = None
self.server = False
self.master = None
self.initialized = threading.Event()
self.wlock = threading.Lock()
self.rlock = threading.Lock()
self.init_lock = threading.Lock()
self._init(*args)
def _init(self, *args):
pass
def disconnect(self):
if self.input:
self.input.disconnect()
def begin(self):
with self.init_lock:
if not self.initialized.isSet():
self._begin()
if self.input:
if not self.initialized.isSet():
self.initialized.wait()
self.input.begin()
def _begin(self):
self.initialized.set()
def end(self):
if self.output:
self.output.end()
def setIn(self, input = None):
self.input = input
if input:
input.setOut(self)
def setOut(self, output = None):
self.output = output
def readIn(self, data):
self.writeOut(data)
def readOut(self, data):
with self.rlock:
self._readOut(data)
def _readOut(self, data):
self.writeIn(data)
def writeIn(self, data):
if self.input:
self.input.readOut(data)
def writeOut(self, data):
self.initialized.wait()
with self.wlock:
self._writeOut(data)
def _writeOut(self, data):
if self.output:
self.output.readIn(data)
def error(self, error):
raise NetworkException(error)
class PacketizerFilter(Filter):
def _init(self):
self.received = ""
def _readOut(self, data):
self.received += data
while len(self.received) > 3:
length ,= struct.unpack("!i",self.received[:4])
if length + 4 <= len(self.received):
self.writeIn(self.received[4:length+4])
self.received = self.received[length+4:]
else:
return
def _writeOut(self, data):
Filter._writeOut(self, struct.pack("!i",len(data))+data)
class CompressionFilter(Filter):
def _init(self):
self.algorithms = {}
self.otherAlgorithms = []
try:
import zlib
self.algorithms['z'] = zlib
except:
pass
try:
import bz2
self.algorithms['b'] = bz2
except:
pass
try:
import noCompress
self.algorithms['n'] = noCompress
except:
pass
def _begin(self):
if self.server:
self._writeOut(''.join(self.algorithms.keys()))
def _readOut(self, data):
if not self.initialized.isSet():
if self.server:
self.otherAlgorithms = [i for i in data]
self.initialized.set()
self.begin()
else:
self.otherAlgorithms = [i for i in data]
self._writeOut(''.join(self.algorithms.keys()))
self.initialized.set()
self.begin()
else:
algorithm = data[0]
if algorithm not in self.algorithms:
self.error("UNKNOWN COMPRESSION ALGORITHM " + data)
self.writeIn(self.algorithms[algorithm].decompress(data[1:]))
def _writeOut(self, data):
if not self.initialized:
Filter._writeOut(self, data)
else:
algorithm = 'n'
newData = data
for i in self.otherAlgorithms:
if i in self.algorithms:
tmpData = self.algorithms[i].compress(data, 9)
if len(tmpData) < len(newData):
newData = tmpData
algorithm = i
Filter._writeOut(self, ''.join((algorithm, newData)))
def EncryptionFilter(Filter):
pass #TODO
class TCPFilter(Filter):
def _init(self, connection = None):
self.connection = connection
def _writeOut(self, data):
if self.connection:
try:
self.connection.send(data)
except:
pass
def poll(self):
try:
data = self.connection.recv(4096)
if data:
self.readOut(data)
else:
self.disconnect()
except:
print "bleh!"
traceback.print_exc(file=sys.stdout)
self.disconnect()
def disconnect(self):
self.master.remove(self.connection)
if self.connection:
self.connection.close()
Filter.disconnect(self)
def end(self):
self.disconnect()
| joshbohde/megaminer-framework | server/networking/Filter.py | Python | agpl-3.0 | 5,766 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class DeliveryCarrier(models.Model):
_name = 'delivery.carrier'
_inherits = {'product.product': 'product_id'}
_description = "Carrier"
_order = 'sequence, id'
''' A Shipping Provider
In order to add your own external provider, follow these steps:
1. Create your model MyProvider that _inherit 'delivery.carrier'
2. Extend the selection of the field "delivery_type" with a pair
('<my_provider>', 'My Provider')
3. Add your methods:
<my_provider>_get_shipping_price_from_so
<my_provider>_send_shipping
<my_provider>_open_tracking_page
<my_provider>_cancel_shipment
(they are documented hereunder)
'''
# -------------------------------- #
# Internals for shipping providers #
# -------------------------------- #
sequence = fields.Integer(help="Determine the display order", default=10)
# This field will be overwritten by internal shipping providers by adding their own type (ex: 'fedex')
delivery_type = fields.Selection([('fixed', 'Fixed Price'), ('base_on_rule', 'Based on Rules')], string='Provider', default='fixed', required=True)
product_type = fields.Selection(related='product_id.type', default='service')
product_sale_ok = fields.Boolean(related='product_id.sale_ok', default=False)
product_id = fields.Many2one('product.product', string='Delivery Product', required=True, ondelete="cascade")
price = fields.Float(compute='get_price')
available = fields.Boolean(compute='get_price')
free_if_more_than = fields.Boolean('Free if Order total is more than', help="If the order is more expensive than a certain amount, the customer can benefit from a free shipping", default=False)
amount = fields.Float(string='Amount', help="Amount of the order to benefit from a free shipping, expressed in the company currency")
country_ids = fields.Many2many('res.country', 'delivery_carrier_country_rel', 'carrier_id', 'country_id', 'Countries')
state_ids = fields.Many2many('res.country.state', 'delivery_carrier_state_rel', 'carrier_id', 'state_id', 'States')
zip_from = fields.Char('Zip From')
zip_to = fields.Char('Zip To')
price_rule_ids = fields.One2many('delivery.price.rule', 'carrier_id', 'Pricing Rules', copy=True)
fixed_price = fields.Float(compute='_compute_fixed_price', inverse='_set_product_fixed_price', store=True, string='Fixed Price',help="Keep empty if the pricing depends on the advanced pricing per destination")
integration_level = fields.Selection([('rate', 'Get Rate'), ('rate_and_ship', 'Get Rate and Create Shipment')], string="Integration Level", default='rate_and_ship', help="Action while validating Delivery Orders")
prod_environment = fields.Boolean("Environment", help="Set to True if your credentials are certified for production.")
margin = fields.Integer(help='This percentage will be added to the shipping price.')
_sql_constraints = [
('margin_not_under_100_percent', 'CHECK (margin >= -100)', 'Margin cannot be lower than -100%'),
]
@api.one
def toggle_prod_environment(self):
self.prod_environment = not self.prod_environment
@api.multi
def install_more_provider(self):
return {
'name': 'New Providers',
'view_mode': 'kanban',
'res_model': 'ir.module.module',
'domain': [['name', 'ilike', 'delivery_']],
'type': 'ir.actions.act_window',
'help': _('''<p class="oe_view_nocontent">
Buy Odoo Enterprise now to get more providers.
</p>'''),
}
@api.multi
def name_get(self):
display_delivery = self.env.context.get('display_delivery', False)
order_id = self.env.context.get('order_id', False)
if display_delivery and order_id:
order = self.env['sale.order'].browse(order_id)
currency = order.pricelist_id.currency_id.name or ''
res = []
for carrier_id in self.ids:
try:
r = self.read([carrier_id], ['name', 'price'])[0]
res.append((r['id'], r['name'] + ' (' + (str(r['price'])) + ' ' + currency + ')'))
except ValidationError:
r = self.read([carrier_id], ['name'])[0]
res.append((r['id'], r['name']))
else:
res = super(DeliveryCarrier, self).name_get()
return res
@api.depends('product_id.list_price', 'product_id.product_tmpl_id.list_price')
def _compute_fixed_price(self):
for carrier in self:
carrier.fixed_price = carrier.product_id.list_price
def _set_product_fixed_price(self):
for carrier in self:
carrier.product_id.list_price = carrier.fixed_price
@api.one
def get_price(self):
SaleOrder = self.env['sale.order']
self.available = False
self.price = False
order_id = self.env.context.get('order_id')
if order_id:
# FIXME: temporary hack until we refactor the delivery API in master
order = SaleOrder.browse(order_id)
if self.delivery_type not in ['fixed', 'base_on_rule']:
try:
computed_price = self.get_shipping_price_from_so(order)[0]
self.available = True
except ValidationError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s, not found", self.name, e.name)
computed_price = 0.0
else:
carrier = self.verify_carrier(order.partner_shipping_id)
if carrier:
try:
computed_price = carrier.get_price_available(order)
self.available = True
except UserError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s", carrier.name, e.name)
computed_price = 0.0
else:
computed_price = 0.0
self.price = computed_price * (1.0 + (float(self.margin) / 100.0))
# -------------------------- #
# API for external providers #
# -------------------------- #
# TODO define and handle exceptions that could be thrown by providers
def get_shipping_price_from_so(self, orders):
''' For every sale order, compute the price of the shipment
:param orders: A recordset of sale orders
:return list: A list of floats, containing the estimated price for the shipping of the sale order
'''
self.ensure_one()
if hasattr(self, '%s_get_shipping_price_from_so' % self.delivery_type):
return getattr(self, '%s_get_shipping_price_from_so' % self.delivery_type)(orders)
def send_shipping(self, pickings):
''' Send the package to the service provider
:param pickings: A recordset of pickings
:return list: A list of dictionaries (one per picking) containing of the form::
{ 'exact_price': price,
'tracking_number': number }
'''
self.ensure_one()
if hasattr(self, '%s_send_shipping' % self.delivery_type):
return getattr(self, '%s_send_shipping' % self.delivery_type)(pickings)
def get_tracking_link(self, pickings):
''' Ask the tracking link to the service provider
:param pickings: A recordset of pickings
:return list: A list of string URLs, containing the tracking links for every picking
'''
self.ensure_one()
if hasattr(self, '%s_get_tracking_link' % self.delivery_type):
return getattr(self, '%s_get_tracking_link' % self.delivery_type)(pickings)
def cancel_shipment(self, pickings):
''' Cancel a shipment
:param pickings: A recordset of pickings
'''
self.ensure_one()
if hasattr(self, '%s_cancel_shipment' % self.delivery_type):
return getattr(self, '%s_cancel_shipment' % self.delivery_type)(pickings)
@api.onchange('state_ids')
def onchange_states(self):
self.country_ids = [(6, 0, self.country_ids.ids + self.state_ids.mapped('country_id.id'))]
@api.onchange('country_ids')
def onchange_countries(self):
self.state_ids = [(6, 0, self.state_ids.filtered(lambda state: state.id in self.country_ids.mapped('state_ids').ids).ids)]
@api.multi
def verify_carrier(self, contact):
self.ensure_one()
if self.country_ids and contact.country_id not in self.country_ids:
return False
if self.state_ids and contact.state_id not in self.state_ids:
return False
if self.zip_from and (contact.zip or '') < self.zip_from:
return False
if self.zip_to and (contact.zip or '') > self.zip_to:
return False
return self
@api.multi
def create_price_rules(self):
PriceRule = self.env['delivery.price.rule']
for record in self:
# If using advanced pricing per destination: do not change
if record.delivery_type == 'base_on_rule':
continue
# Not using advanced pricing per destination: override lines
if record.delivery_type == 'base_on_rule' and not (record.fixed_price is not False or record.free_if_more_than):
record.price_rule_ids.unlink()
# Check that float, else 0.0 is False
if not (record.fixed_price is not False or record.free_if_more_than):
continue
if record.delivery_type == 'fixed':
PriceRule.search([('carrier_id', '=', record.id)]).unlink()
line_data = {
'carrier_id': record.id,
'variable': 'price',
'operator': '>=',
}
# Create the delivery price rules
if record.free_if_more_than:
line_data.update({
'max_value': record.amount,
'standard_price': 0.0,
'list_base_price': 0.0,
})
PriceRule.create(line_data)
if record.fixed_price is not False:
line_data.update({
'max_value': 0.0,
'standard_price': record.fixed_price,
'list_base_price': record.fixed_price,
})
PriceRule.create(line_data)
return True
@api.model
def create(self, vals):
res = super(DeliveryCarrier, self).create(vals)
res.create_price_rules()
return res
@api.multi
def write(self, vals):
res = super(DeliveryCarrier, self).write(vals)
self.create_price_rules()
return res
@api.multi
def get_price_available(self, order):
self.ensure_one()
total = weight = volume = quantity = 0
total_delivery = 0.0
for line in order.order_line:
if line.state == 'cancel':
continue
if line.is_delivery:
total_delivery += line.price_total
if not line.product_id or line.is_delivery:
continue
qty = line.product_uom._compute_quantity(line.product_uom_qty, line.product_id.uom_id)
weight += (line.product_id.weight or 0.0) * qty
volume += (line.product_id.volume or 0.0) * qty
quantity += qty
total = (order.amount_total or 0.0) - total_delivery
total = order.currency_id.with_context(date=order.date_order).compute(total, order.company_id.currency_id)
return self.get_price_from_picking(total, weight, volume, quantity)
def get_price_from_picking(self, total, weight, volume, quantity):
price = 0.0
criteria_found = False
price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity}
for line in self.price_rule_ids:
test = safe_eval(line.variable + line.operator + str(line.max_value), price_dict)
if test:
price = line.list_base_price + line.list_price * price_dict[line.variable_factor]
criteria_found = True
break
if not criteria_found:
raise UserError(_("Selected product in the delivery method doesn't fulfill any of the delivery carrier(s) criteria."))
return price
| hip-odoo/odoo | addons/delivery/models/delivery_carrier.py | Python | agpl-3.0 | 13,010 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import json
from rest_framework.test import APIClient
from rest_framework import status
from shuup.core.models import Order
from shuup.testing.factories import (
create_order_with_product, get_default_product,
get_default_shop, get_default_supplier
)
def create_order():
shop = get_default_shop()
product = get_default_product()
supplier = get_default_supplier()
order = create_order_with_product(
product,
shop=shop,
supplier=supplier,
quantity=1,
taxless_base_unit_price=10,
)
order.cache_prices()
order.save()
return order
def get_client(admin_user):
client = APIClient()
client.force_authenticate(user=admin_user)
return client
def get_create_payment_url(order_pk):
return "/api/shuup/order/%s/create_payment/" % order_pk
def get_set_fully_paid_url(order_pk):
return "/api/shuup/order/%s/set_fully_paid/" % order_pk
def get_order_url(order_pk):
return "/api/shuup/order/%s/" % order_pk
def test_create_payment(admin_user):
order = create_order()
client = get_client(admin_user)
payment_identifier = "some_identifier"
data = {
"amount_value": 1,
"payment_identifier": payment_identifier,
"description": "some_payment"
}
response = client.post(
get_create_payment_url(order.pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
assert order.get_total_paid_amount().value == 1
response = client.get(
get_order_url(order.pk),
format="json"
)
assert response.status_code == status.HTTP_200_OK
order_data = json.loads(response.content.decode("utf-8"))
payments = order_data["payments"]
assert len(payments) == 1
assert payments[0]["payment_identifier"] == payment_identifier
def test_set_fully_paid(admin_user):
order = create_order()
client = get_client(admin_user)
data = {
"payment_identifier": 1,
"description": "some_payment"
}
order_pk = order.pk
response = client.post(
get_set_fully_paid_url(order_pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
currently_paid_amount = order.get_total_paid_amount()
# Make sure that api works with already fully paid orders
response = client.post(
"/api/shuup/order/%s/set_fully_paid/" % order_pk,
data,
format="json"
)
assert response.status_code == status.HTTP_200_OK
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
assert currently_paid_amount == order.get_total_paid_amount()
def test_set_paid_from_partially_paid_order(admin_user):
order = create_order()
client = get_client(admin_user)
data = {
"amount_value": 1,
"payment_identifier": 1,
"description": "some_payment"
}
response = client.post(
get_create_payment_url(order.pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
assert order.get_total_paid_amount().value == 1
data = {
"payment_identifier": 2,
"description": "some_payment"
}
order_pk = order.pk
response = client.post(
get_set_fully_paid_url(order_pk),
data,
format="json"
)
assert response.status_code == status.HTTP_201_CREATED
order = Order.objects.get(pk=order_pk)
assert bool(order.is_paid())
assert bool(order.get_total_paid_amount() == order.taxful_total_price.amount)
| suutari/shoop | shuup_tests/core/test_payments_api.py | Python | agpl-3.0 | 3,959 |
'''
@since: 2015-01-07
@author: moschlar
'''
import sqlalchemy.types as sqlat
import tw2.core as twc
import tw2.bootstrap.forms as twb
import tw2.jqplugins.chosen.widgets as twjc
import sprox.widgets.tw2widgets.widgets as sw
from sprox.sa.widgetselector import SAWidgetSelector
from sprox.sa.validatorselector import SAValidatorSelector, Email
from sauce.widgets.widgets import (LargeMixin, SmallMixin, AdvancedWysihtml5,
MediumTextField, SmallTextField, CalendarDateTimePicker)
from sauce.widgets.validators import AdvancedWysihtml5BleachValidator
class ChosenPropertyMultipleSelectField(LargeMixin, twjc.ChosenMultipleSelectField, sw.PropertyMultipleSelectField):
search_contains = True
def _validate(self, value, state=None):
value = super(ChosenPropertyMultipleSelectField, self)._validate(value, state)
if self.required and not value:
raise twc.ValidationError('Please select at least one value')
else:
return value
class ChosenPropertySingleSelectField(SmallMixin, twjc.ChosenSingleSelectField, sw.PropertySingleSelectField):
search_contains = True
class MyWidgetSelector(SAWidgetSelector):
'''Custom WidgetSelector for SAUCE
Primarily uses fields from tw2.bootstrap.forms and tw2.jqplugins.chosen.
'''
text_field_limit = 256
default_multiple_select_field_widget_type = ChosenPropertyMultipleSelectField
default_single_select_field_widget_type = ChosenPropertySingleSelectField
default_name_based_widgets = {
'name': MediumTextField,
'subject': MediumTextField,
'_url': MediumTextField,
'user_name': MediumTextField,
'email_address': MediumTextField,
'_display_name': MediumTextField,
'description': AdvancedWysihtml5,
'message': AdvancedWysihtml5,
}
def __init__(self, *args, **kwargs):
self.default_widgets.update({
sqlat.String: MediumTextField,
sqlat.Integer: SmallTextField,
sqlat.Numeric: SmallTextField,
sqlat.DateTime: CalendarDateTimePicker,
sqlat.Date: twb.CalendarDatePicker,
sqlat.Time: twb.CalendarTimePicker,
sqlat.Binary: twb.FileField,
sqlat.BLOB: twb.FileField,
sqlat.PickleType: MediumTextField,
sqlat.Enum: twjc.ChosenSingleSelectField,
})
super(MyWidgetSelector, self).__init__(*args, **kwargs)
def select(self, field):
widget = super(MyWidgetSelector, self).select(field)
if (issubclass(widget, sw.TextArea)
and hasattr(field.type, 'length')
and (field.type.length is None or field.type.length < self.text_field_limit)):
widget = MediumTextField
return widget
class MyValidatorSelector(SAValidatorSelector):
_name_based_validators = {
'email_address': Email,
'description': AdvancedWysihtml5BleachValidator,
'message': AdvancedWysihtml5BleachValidator,
}
# def select(self, field):
# print 'MyValidatorSelector', 'select', field
# return super(MyValidatorSelector, self).select(field)
| moschlar/SAUCE | sauce/controllers/crc/selectors.py | Python | agpl-3.0 | 3,164 |
""" Code to allow module store to interface with courseware index """
from __future__ import absolute_import
from abc import ABCMeta, abstractmethod
from datetime import timedelta
import logging
import re
from six import add_metaclass
from django.conf import settings
from django.utils.translation import ugettext_lazy, ugettext as _
from django.core.urlresolvers import resolve
from contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from eventtracking import tracker
from openedx.core.lib.courses import course_image_url
from search.search_engine_base import SearchEngine
from xmodule.annotator_mixin import html_to_text
from xmodule.modulestore import ModuleStoreEnum
from xmodule.library_tools import normalize_key_for_search
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = unicode(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
unicode(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
unicode(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(unicode(item_location), None)
item_id = unicode(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %r', item.location, err)
error_list.append(_('Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = ugettext_lazy("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
groups_usage_info = GroupConfiguration.get_content_groups_usage_info(modulestore, structure).items()
groups_usage_info.extend(
GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
).items()
)
if groups_usage_info:
for name, group in groups_usage_info:
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = unicode(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = unicode(cls.UNNAMED_MODULE_NAME)
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": unicode(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = unicode(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.name: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, basestring):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except: # pylint: disable=bare-except
log.exception(
"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
"Successfully added %s course to the course discovery index",
course_id
)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def remove_deleted_items(cls, structure_key):
""" Remove item from Course About Search_index """
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
response = searcher.search(
doc_type=cls.DISCOVERY_DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key)
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DISCOVERY_DOCUMENT_TYPE, result_ids)
| naresh21/synergetics-edx-platform | cms/djangoapps/contentstore/courseware_index.py | Python | agpl-3.0 | 27,600 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2013 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import stock_production_lot_ext
import stock_picking_ext
import stock_move_ext
import purchase_order_ext
import stock_move_split_ext
| avanzosc/avanzosc6.1 | avanzosc_net_weight_in_lots/__init__.py | Python | agpl-3.0 | 1,119 |
# coding: utf-8
# The Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`AtkinsonWald2007`.
"""
from __future__ import division
import numpy as np
from openquake.hazardlib.gsim.base import IPE
from openquake.hazardlib import const
from openquake.hazardlib.imt import MMI
class AtkinsonWald2007(IPE):
"""
Implements IPE developed by Atkinson and Wald (2007)
California, USA
MS!
"""
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
MMI
])
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
# TODO !
REQUIRES_SITES_PARAMETERS = set(('vs30', ))
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
REQUIRES_DISTANCES = set(('rrup', ))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
h = 14.0
R = np.sqrt(dists.rrup**2 + h**2)
B = np.zeros_like(dists.rrup)
B[R > 30.] = np.log10(R / 30.)[R > 30.]
mean_mmi = 12.27 + 2.270 * (rup.mag - 6) + 0.1304 * (rup.mag - 6)**2 - 1.30 * np.log10(R) - 0.0007070 * R + 1.95 * B - 0.577 * rup.mag * np.log10(R)
mean_mmi += self.compute_site_term(sites)
mean_mmi = mean_mmi.clip(min=1, max=12)
stddevs = np.zeros_like(dists.rrup)
stddevs.fill(0.4)
stddevs = stddevs.reshape(1, len(stddevs))
return mean_mmi, stddevs
def compute_site_term(self, sites):
# TODO !
return 0
| ROB-Seismology/oq-hazardlib | openquake/hazardlib/gsim/atkinson_wald_2007.py | Python | agpl-3.0 | 2,419 |
#!/usr/bin/env python3
"""
A simple bot to gather some census data in IRC channels.
It is intended to sit in a channel and collect the data for statistics.
:author: tpltnt
:license: AGPLv3
"""
import irc.bot
import irc.strings
from irc.client import ip_numstr_to_quad, ip_quad_to_numstr
class CensusBot(irc.bot.SingleServerIRCBot):
"""
The class implementing the census bot.
"""
def __init__(self, channel, nickname, server, port=6667):
"""
The constructor for the CensusBot class.
:param channel: name of the channel to join
:type channel: str
:param nickname: nick of the bot (to use)
:type nickname: str
:param server: FQDN of the server to use
:type server: str
:param port: port to use when connecting to the server
:type port: int
"""
if 0 != channel.find('#'):
channel = '#' + channel
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
def on_nickname_in_use(self, connection, event):
"""
Change own nickname if already in use.
:param connection: connection to the server
:type connection: irc.client.ServerConnection
:param event: event to react to
:type event:
:raises: TypeError
"""
if not isinstance(connection, ServerConnection):
raise TypeError("'connection' is not of type 'ServerConnection'")
connection.nick(connection.get_nickname() + "_")
def main():
import sys
if len(sys.argv) != 4:
print("Usage: " + sys.argv[0] + " <server[:port]> <channel> <nickname>")
sys.exit(1)
server = sys.argv[1].split(":", 1)
host = server[0]
if len(server) == 2:
try:
port = int(server[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
channel = sys.argv[2]
nickname = sys.argv[3]
bot = CensusBot(channel, nickname, server, port)
bot.start()
if __name__ == "__main__":
main()
| tpltnt/ircensus | ircensus_channel_bot.py | Python | agpl-3.0 | 2,132 |
# Amara, universalsubtitles.org
#
# Copyright (C) 2017 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import uuid
from django.conf import settings
from django.core.cache import cache
from django.urls import reverse
def _mk_key(token):
return "one-time-data-" + token
def set_one_time_data(data):
token = str(uuid.uuid4())
key = _mk_key(token)
cache.set(key, data, 60)
return '{}://{}{}'.format(settings.DEFAULT_PROTOCOL,
settings.HOSTNAME,
reverse("one_time_url", kwargs={"token": token}))
def get_one_time_data(token):
key = _mk_key(token)
data = cache.get(key)
# It seems like Brightcove wants to hit it twice
# cache.delete(key)
return data
| pculture/unisubs | utils/one_time_data.py | Python | agpl-3.0 | 1,410 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import hr_certification
from . import hr_training_participant
| open-synergy/opnsynid-hr | hr_employee_training_experience/models/__init__.py | Python | agpl-3.0 | 197 |
"""Make session:proposal 1:1.
Revision ID: 3a6b2ab00e3e
Revises: 4dbf686f4380
Create Date: 2013-11-09 13:51:58.343243
"""
# revision identifiers, used by Alembic.
revision = '3a6b2ab00e3e'
down_revision = '4dbf686f4380'
from alembic import op
def upgrade():
op.create_unique_constraint('session_proposal_id_key', 'session', ['proposal_id'])
def downgrade():
op.drop_constraint('session_proposal_id_key', 'session', 'unique')
| hasgeek/funnel | migrations/versions/3a6b2ab00e3e_session_proposal_one.py | Python | agpl-3.0 | 441 |
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Noé Rubinstein
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import re
from weboob.capabilities.gallery import ICapGallery, BaseGallery, BaseImage
from weboob.tools.backend import BaseBackend
from weboob.tools.browser import BaseBrowser, BasePage
__all__ = ['GenericComicReaderBackend']
class DisplayPage(BasePage):
def get_page(self, gallery):
src = self.document.xpath(self.browser.params['img_src_xpath'])[0]
return BaseImage(src,
gallery=gallery,
url=src)
def page_list(self):
return self.document.xpath(self.browser.params['page_list_xpath'])
class GenericComicReaderBrowser(BaseBrowser):
def __init__(self, browser_params, *args, **kwargs):
self.params = browser_params
BaseBrowser.__init__(self, *args, **kwargs)
def iter_gallery_images(self, gallery):
self.location(gallery.url)
assert self.is_on_page(DisplayPage)
for p in self.page.page_list():
if 'page_to_location' in self.params:
self.location(self.params['page_to_location'] % p)
else:
self.location(p)
assert self.is_on_page(DisplayPage)
yield self.page.get_page(gallery)
def fill_image(self, image, fields):
if 'data' in fields:
image.data = self.readurl(image.url)
class GenericComicReaderBackend(BaseBackend, ICapGallery):
NAME = 'genericcomicreader'
MAINTAINER = u'Noé Rubinstein'
EMAIL = '[email protected]'
VERSION = '0.f'
DESCRIPTION = 'Generic comic reader backend; subclasses implement specific sites'
LICENSE = 'AGPLv3+'
BROWSER = GenericComicReaderBrowser
BROWSER_PARAMS = {}
ID_REGEXP = None
URL_REGEXP = None
ID_TO_URL = None
PAGES = {}
def create_default_browser(self):
b = self.create_browser(self.BROWSER_PARAMS)
b.PAGES = self.PAGES
try:
b.DOMAIN = self.DOMAIN
except AttributeError:
pass
return b
def iter_gallery_images(self, gallery):
with self.browser:
return self.browser.iter_gallery_images(gallery)
def get_gallery(self, _id):
match = re.match(r'^%s$' % self.URL_REGEXP, _id)
if match:
_id = match.group(1)
else:
match = re.match(r'^%s$' % self.ID_REGEXP, _id)
if match:
_id = match.group(0)
else:
return None
gallery = BaseGallery(_id, url=(self.ID_TO_URL % _id))
with self.browser:
return gallery
def fill_gallery(self, gallery, fields):
gallery.title = gallery.id
def fill_image(self, image, fields):
with self.browser:
self.browser.fill_image(image, fields)
OBJECTS = {
BaseGallery: fill_gallery,
BaseImage: fill_image}
| franek/weboob | weboob/tools/capabilities/gallery/genericcomicreader.py | Python | agpl-3.0 | 3,614 |
import os
test_dir = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(test_dir, 'db.sqlite3'),
}
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'imperavi',
'tinymce',
'newsletter'
]
# Imperavi is not compatible with Django 1.9+
import django
if django.VERSION > (1, 8):
INSTALLED_APPS.remove('imperavi')
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
FIXTURE_DIRS = [os.path.join(test_dir, 'fixtures'), ]
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(test_dir, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Enable time-zone support
USE_TZ = True
TIME_ZONE = 'UTC'
# Required for django-webtest to work
STATIC_URL = '/static/'
# Random secret key
import random
key_chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
SECRET_KEY = ''.join([
random.SystemRandom().choice(key_chars) for i in range(50)
])
# Logs all newsletter app messages to the console
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'newsletter': {
'handlers': ['console'],
'propagate': True,
},
},
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
| dsanders11/django-newsletter | test_project/test_project/settings.py | Python | agpl-3.0 | 2,081 |
class FieldRegistry(object):
_registry = {}
def add_field(self, model, field):
reg = self.__class__._registry.setdefault(model, [])
reg.append(field)
def get_fields(self, model):
return self.__class__._registry.get(model, [])
def __contains__(self, model):
return model in self.__class__._registry
| feroda/django-pro-history | current_user/registration.py | Python | agpl-3.0 | 349 |
# Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
from zope.component import (
ComponentLookupError,
getMultiAdapter,
)
from zope.configuration import xmlconfig
from zope.interface import (
implements,
Interface,
)
from zope.publisher.interfaces.browser import (
IBrowserPublisher,
IDefaultBrowserLayer,
)
from zope.testing.cleanup import cleanUp
from lp.services.webapp import Navigation
from lp.testing import TestCase
class TestNavigationDirective(TestCase):
def test_default_layer(self):
# By default all navigation classes are registered for
# IDefaultBrowserLayer.
directive = """
<browser:navigation
module="%(this)s" classes="ThingNavigation"/>
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
navigation = getMultiAdapter(
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
self.assertIsInstance(navigation, ThingNavigation)
def test_specific_layer(self):
# If we specify a layer when registering a navigation class, it will
# only be available on that layer.
directive = """
<browser:navigation
module="%(this)s" classes="OtherThingNavigation"
layer="%(this)s.IOtherLayer" />
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
self.assertRaises(
ComponentLookupError,
getMultiAdapter,
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
navigation = getMultiAdapter(
(Thing(), OtherLayer()), IBrowserPublisher, name='')
self.assertIsInstance(navigation, OtherThingNavigation)
def test_multiple_navigations_for_single_context(self):
# It is possible to have multiple navigation classes for a given
# context class as long as they are registered for different layers.
directive = """
<browser:navigation
module="%(this)s" classes="ThingNavigation"/>
<browser:navigation
module="%(this)s" classes="OtherThingNavigation"
layer="%(this)s.IOtherLayer" />
""" % dict(this=this)
xmlconfig.string(zcml_configure % directive)
navigation = getMultiAdapter(
(Thing(), DefaultBrowserLayer()), IBrowserPublisher, name='')
other_navigation = getMultiAdapter(
(Thing(), OtherLayer()), IBrowserPublisher, name='')
self.assertNotEqual(navigation, other_navigation)
def tearDown(self):
TestCase.tearDown(self)
cleanUp()
class DefaultBrowserLayer:
implements(IDefaultBrowserLayer)
class IThing(Interface):
pass
class Thing(object):
implements(IThing)
class ThingNavigation(Navigation):
usedfor = IThing
class OtherThingNavigation(Navigation):
usedfor = IThing
class IOtherLayer(Interface):
pass
class OtherLayer:
implements(IOtherLayer)
this = "lp.services.webapp.tests.test_navigation"
zcml_configure = """
<configure xmlns:browser="http://namespaces.zope.org/browser">
<include package="lp.services.webapp" file="meta.zcml" />
%s
</configure>
"""
| abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/webapp/tests/test_navigation.py | Python | agpl-3.0 | 3,377 |
import os
import sys
import glob
import json
import subprocess
from collections import defaultdict
from utils import UnicodeReader, slugify, count_pages, combine_pdfs, parser
import addresscleaner
from click2mail import Click2MailBatch
parser.add_argument("directory", help="Path to downloaded mail batch")
parser.add_argument("--skip-letters", action='store_true', default=False)
parser.add_argument("--skip-postcards", action='store_true', default=False)
def fix_lines(address):
"""
Click2Mail screws up addresses with 3 lines. If we have only one address
line, put it in "address1". If we have more, put the first in
"organization", and subsequent ones in "addressN".
"""
lines = [a for a in [
address.get('organization', None),
address.get('address1', None),
address.get('address2', None),
address.get('address3', None)] if a]
if len(lines) == 1:
address['organization'] = ''
address['address1'] = lines[0]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 2:
address['organization'] = lines[0]
address['address1'] = lines[1]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 3:
address['address2'] = lines[2]
address['address3'] = ''
if len(lines) >= 4:
address['address3'] = lines[3]
return address
def collate_letters(mailing_dir, letters, page=1):
# Sort by recipient.
recipient_letters = defaultdict(list)
for letter in letters:
recipient_letters[(letter['recipient'], letter['sender'])].append(letter)
# Assemble list of files and jobs.
files = []
jobs = {}
for (recipient, sender), letters in recipient_letters.iteritems():
count = 0
for letter in letters:
filename = os.path.join(mailing_dir, letter["file"])
files.append(filename)
count += count_pages(filename)
end = page + count
jobs[recipient] = {
"startingPage": page,
"endingPage": end - 1,
"recipients": [fix_lines(addresscleaner.parse_address(recipient))],
"sender": addresscleaner.parse_address(sender),
"type": "letter"
}
page = end
vals = jobs.values()
vals.sort(key=lambda j: j['startingPage'])
return files, vals, page
def collate_postcards(postcards, page=1):
# Collate postcards into a list per type and sender.
type_sender_postcards = defaultdict(list)
for letter in postcards:
key = (letter['type'], letter['sender'])
type_sender_postcards[key].append(letter)
files = []
jobs = []
for (postcard_type, sender), letters in type_sender_postcards.iteritems():
files.append(os.path.join(
os.path.dirname(__file__),
"postcards",
"{}.pdf".format(postcard_type)
))
jobs.append({
"startingPage": page + len(files) - 1,
"endingPage": page + len(files) - 1,
"recipients": [
fix_lines(addresscleaner.parse_address(letter['recipient'])) for letter in letters
],
"sender": addresscleaner.parse_address(sender),
"type": "postcard",
})
return files, jobs, page + len(files)
def run_batch(args, files, jobs):
filename = combine_pdfs(files)
print "Building job with", filename
batch = Click2MailBatch(
username=args.username,
password=args.password,
filename=filename,
jobs=jobs,
staging=args.staging)
if batch.run(args.dry_run):
os.remove(filename)
def main():
args = parser.parse_args()
if args.directory.endswith(".zip"):
directory = os.path.abspath(args.directory[0:-len(".zip")])
if not os.path.exists(directory):
subprocess.check_call([
"unzip", args.directory, "-d", os.path.dirname(args.directory)
])
else:
directory = args.directory
with open(os.path.join(directory, "manifest.json")) as fh:
manifest = json.load(fh)
if manifest["letters"] and not args.skip_letters:
lfiles, ljobs, lpage = collate_letters(directory, manifest["letters"], 1)
print "Found", len(ljobs), "letter jobs"
if ljobs:
run_batch(args, lfiles, ljobs)
if manifest["postcards"] and not args.skip_postcards:
pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
print "Found", len(pjobs), "postcard jobs"
if pjobs:
run_batch(args, pfiles, pjobs)
if __name__ == "__main__":
main()
| yourcelf/btb | printing/print_mail.py | Python | agpl-3.0 | 4,703 |
# -*- coding: utf-8 -*-
# Copyright 2017 Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import test_employee_display_own_info
| VitalPet/addons-onestein | hr_employee_display_own_info/tests/__init__.py | Python | agpl-3.0 | 192 |
from django.conf.urls import patterns, url
from application import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^(?P<application_id>\d+)/$', views.detail, name='detail'),
url(r'^klogin/(?P<username>\w+)/(?P<password>\w+)/$', views.klogin, name='klogin'),
)
| davidegalletti/koa-proof-of-concept | kag/application/urls.py | Python | agpl-3.0 | 305 |
from disco.core import Disco, result_iterator
from disco.settings import DiscoSettings
from disco.func import chain_reader
from discodex.objects import DataSet
from freequery.document import docparse
from freequery.document.docset import Docset
from freequery.index.tf_idf import TfIdf
class IndexJob(object):
def __init__(self, spec, discodex,
disco_addr="disco://localhost", profile=False):
# TODO(sqs): refactoring potential with PagerankJob
self.spec = spec
self.discodex = discodex
self.docset = Docset(spec.docset_name)
self.disco = Disco(DiscoSettings()['DISCO_MASTER'])
self.nr_partitions = 8
self.profile = profile
def start(self):
results = self.__run_job(self.__index_job())
self.__run_discodex_index(results)
def __run_job(self, job):
results = job.wait()
if self.profile:
self.__profile_job(job)
return results
def __index_job(self):
return self.disco.new_job(
name="index_tfidf",
input=['tag://' + self.docset.ddfs_tag],
map_reader=docparse,
map=TfIdf.map,
reduce=TfIdf.reduce,
sort=True,
partitions=self.nr_partitions,
partition=TfIdf.partition,
merge_partitions=False,
profile=self.profile,
params=dict(doc_count=self.docset.doc_count))
def __run_discodex_index(self, results):
opts = {
'parser': 'disco.func.chain_reader',
'demuxer': 'freequery.index.tf_idf.TfIdf_demux',
'nr_ichunks': 1, # TODO(sqs): after disco#181 fixed, increase this
}
ds = DataSet(input=results, options=opts)
origname = self.discodex.index(ds)
self.disco.wait(origname) # origname is also the disco job name
self.discodex.clone(origname, self.spec.invindex_name)
| sqs/freequery | freequery/index/job.py | Python | agpl-3.0 | 1,947 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class MalwareGroupIPsParserBot(Bot):
def process(self):
report = self.receive_message()
if not report:
self.acknowledge_message()
return
if not report.contains("raw"):
self.acknowledge_message()
raw_report = utils.base64_decode(report.value("raw"))
raw_report = raw_report.split("<tbody>")[1]
raw_report = raw_report.split("</tbody>")[0]
raw_report_splitted = raw_report.split("<tr>")
for row in raw_report_splitted:
row = row.strip()
if row == "":
continue
row_splitted = row.split("<td>")
ip = row_splitted[1].split('">')[1].split("<")[0].strip()
time_source = row_splitted[6].replace("</td></tr>", "").strip()
time_source = time_source + " 00:00:00 UTC"
event = Event(report)
event.add('time.source', time_source, sanitize=True)
event.add('classification.type', u'malware')
event.add('source.ip', ip, sanitize=True)
event.add('raw', row, sanitize=True)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = MalwareGroupIPsParserBot(sys.argv[1])
bot.start()
| sch3m4/intelmq | intelmq/bots/parsers/malwaregroup/parser_ips.py | Python | agpl-3.0 | 1,460 |
# Simple script to run required operations to
# 1. Download FASTAs from database
# 2. Copy FASTAs to nextflu directory
# 3. Download titer tables from database
# 4. Copy titer tables to nextflu directory
# Run from base fauna directory with python flu/download_all.py
# Assumes that nextflu/, nextflu-cdc/ and nextflu-cdc-fra/ are
# sister directories to fauna/
import os, subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--virus', default="flu", help="virus to download; default is flu")
parser.add_argument('--flu_lineages', default=["h3n2", "h1n1pdm", "vic", "yam"], nargs='+', type = str, help ="seasonal flu lineages to download, options are h3n2, h1n1pdm, vic and yam")
parser.add_argument('--segments', type=str, default=['ha', 'na'], nargs='+', help="specify segment(s) to download")
parser.add_argument('--sequences', default=False, action="store_true", help="download sequences from vdb")
parser.add_argument('--titers', default=False, action="store_true", help="download titers from tdb")
parser.add_argument('--titers_sources', default=["base", "crick", "cdc", "niid", "vidrl"], nargs='+', type = str, help ="titer sources to download, options are base, cdc, crick, niid and vidrl")
parser.add_argument('--titers_passages', default=["egg", "cell"], nargs='+', type = str, help ="titer passage types to download, options are egg and cell")
def concatenate_titers(params, passage, assay):
for lineage in params.flu_lineages:
out = 'data/%s_who_%s_%s_titers.tsv'%(lineage, assay, passage)
hi_titers = []
for source in params.titers_sources:
hi_titers_file = 'data/%s_%s_%s_%s_titers.tsv'%(lineage, source, assay, passage)
if os.path.isfile(hi_titers_file):
hi_titers.append(hi_titers_file)
if len(hi_titers) > 0:
with open(out, 'w+') as f:
call = ['cat'] + hi_titers
print call
subprocess.call(call, stdout=f)
for lineage in params.flu_lineages:
out = 'data/%s_public_%s_%s_titers.tsv'%(lineage, assay, passage)
hi_titers = []
for source in ["base", "cdc"]:
hi_titers_file = 'data/%s_%s_%s_%s_titers.tsv'%(lineage, source, assay, passage)
if os.path.isfile(hi_titers_file):
hi_titers.append(hi_titers_file)
if len(hi_titers) > 0:
with open(out, 'w+') as f:
call = ['cat'] + hi_titers
print call
subprocess.call(call, stdout=f)
if __name__=="__main__":
params = parser.parse_args()
if params.virus == "flu":
# Download FASTAs from database
if params.sequences:
segments = params.segments
for segment in segments:
for lineage in params.flu_lineages:
call = "python vdb/flu_download.py -db vdb -v flu --select locus:%s lineage:seasonal_%s --fstem %s_%s --resolve_method split_passage"%(segment.upper(), lineage, lineage, segment)
print(call)
os.system(call)
if params.titers:
# download titers
for source in params.titers_sources:
if source == "base":
for lineage in params.flu_lineages:
call = "python tdb/download.py -db tdb -v flu --subtype %s --select assay_type:hi --fstem %s_base_hi_cell"%(lineage, lineage)
print(call)
os.system(call)
if source in ["cdc", "crick", "niid", "vidrl"]:
for passage in params.titers_passages:
for lineage in params.flu_lineages:
call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:hi serum_passage_category:%s --fstem %s_%s_hi_%s"%(source, lineage, passage, lineage, source, passage)
print(call)
os.system(call)
lineage = 'h3n2'
call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:fra serum_passage_category:%s --fstem %s_%s_fra_%s"%(source, lineage, passage, lineage, source, passage)
print(call)
os.system(call)
if source == "cdc":
for lineage in params.flu_lineages:
call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:hi serum_host:human --fstem %s_%s_hi_%s_human"%(source, lineage, lineage, source, passage)
print(call)
os.system(call)
lineage = 'h3n2'
call = "python tdb/download.py -db %s_tdb -v flu --subtype %s --select assay_type:fra serum_host:human --fstem %s_%s_fra_%s_human"%(source, lineage, lineage, source, passage)
print(call)
os.system(call)
# concatenate to create default HI strain TSVs for each subtype
concatenate_titers(params, "cell", "hi")
concatenate_titers(params, "cell", "fra")
concatenate_titers(params, "egg", "hi")
concatenate_titers(params, "egg", "fra")
elif params.virus == "ebola":
call = "python vdb/ebola_download.py -db vdb -v ebola --fstem ebola"
print(call)
os.system(call)
elif params.virus == "dengue":
# Download all serotypes together.
call = "python vdb/dengue_download.py"
print(call)
os.system(call)
# Download individual serotypes.
serotypes = [1, 2, 3, 4]
for serotype in serotypes:
call = "python vdb/dengue_download.py --select serotype:%i" % serotype
print(call)
os.system(call)
# Download titers.
if params.titers:
call = "python tdb/download.py -db tdb -v dengue --fstem dengue"
print(call)
os.system(call)
elif params.virus == "zika":
call = "python vdb/zika_download.py -db vdb -v zika --fstem zika"
print(call)
os.system(call)
elif params.virus == "mumps":
call = "python vdb/mumps_download.py -db vdb -v mumps --fstem mumps --resolve_method choose_genbank"
print(call)
os.system(call)
elif params.virus == "h7n9" or params.virus == "avian":
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PB2 --fstem h7n9_pb2")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PB1 --fstem h7n9_pb1")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:PA --fstem h7n9_pa")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:HA --fstem h7n9_ha")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NP --fstem h7n9_np")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NA --fstem h7n9_na")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:MP --fstem h7n9_mp")
os.system("python vdb/h7n9_download.py -db vdb -v h7n9 --select locus:NS --fstem h7n9_ns")
else:
print("%s is an invalid virus type.\nValid viruses are flu, ebola, dengue, zika, mumps, h7n9, and avian."%(params.virus))
sys.exit(2)
| blab/nextstrain-db | download_all.py | Python | agpl-3.0 | 7,407 |
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Product Code Unique",
"summary": "Add the unique property to default_code field",
"version": "9.0.1.0.0",
"category": "Product",
"website": "https://odoo-community.org/",
"author": "<Deysy Mascorro>, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
"depends": [
"base",
"product",
],
"data": [
"views/product_view.xml"
],
"demo": [
],
"qweb": [
]
}
| Gebesa-Dev/Addons-gebesa | product_code_unique/__openerp__.py | Python | agpl-3.0 | 701 |
import ddt
from django.contrib.auth import login, authenticate
from importlib import import_module
from django_lti_tool_provider import AbstractApplicationHookManager
from mock import patch, Mock
from oauth2 import Request, Consumer, SignatureMethod_HMAC_SHA1
from django.contrib.auth.models import User
from django.test.utils import override_settings
from django.test import Client, TestCase, RequestFactory
from django.conf import settings
from django_lti_tool_provider.models import LtiUserData
from django_lti_tool_provider.views import LTIView
@override_settings(
LTI_CLIENT_KEY='qertyuiop1234567890!@#$%^&*()_+[];',
LTI_CLIENT_SECRET='1234567890!@#$%^&*()_+[];./,;qwertyuiop'
)
class LtiRequestsTestBase(TestCase):
_data = {
"lis_result_sourcedid": "lis_result_sourcedid",
"context_id": "LTIX/LTI-101/now",
"user_id": "1234567890",
"roles": ["Student"],
"lis_outcome_service_url": "lis_outcome_service_url",
"resource_link_id": "resource_link_id",
"lti_version": "LTI-1p0",
'lis_person_sourcedid': 'username',
'lis_person_contact_email_primary': '[email protected]'
}
_url_base = 'http://testserver'
DEFAULT_REDIRECT = '/home'
def setUp(self):
self.client = Client()
self.hook_manager = Mock(spec=AbstractApplicationHookManager)
self.hook_manager.vary_by_key = Mock(return_value=None)
self.hook_manager.optional_lti_parameters = Mock(return_value={})
LTIView.register_authentication_manager(self.hook_manager)
@property
def consumer(self):
return Consumer(settings.LTI_CLIENT_KEY, settings.LTI_CLIENT_SECRET)
def _get_signed_oauth_request(self, path, method, data=None):
data = data if data is not None else self._data
url = self._url_base + path
method = method if method else 'GET'
req = Request.from_consumer_and_token(self.consumer, {}, method, url, data)
req.sign_request(SignatureMethod_HMAC_SHA1(), self.consumer, None)
return req
def get_correct_lti_payload(self, path='/lti/', method='POST', data=None):
req = self._get_signed_oauth_request(path, method, data)
return req.to_postdata()
def get_incorrect_lti_payload(self, path='/lti/', method='POST', data=None):
req = self._get_signed_oauth_request(path, method, data)
req['oauth_signature'] += '_broken'
return req.to_postdata()
def send_lti_request(self, payload, client=None):
client = client or self.client
return client.post('/lti/', payload, content_type='application/x-www-form-urlencoded')
def _authenticate(self, username='test'):
self.client = Client()
user = User.objects.get(username=username)
logged_in = self.client.login(username=username, password='test')
self.assertTrue(logged_in)
return user
def _logout(self):
self.client.logout()
def _verify_redirected_to(self, response, expected_url):
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, expected_url)
def _verify_session_lti_contents(self, session, expected):
self.assertIn('lti_parameters', session)
self._verify_lti_data(session['lti_parameters'], expected)
def _verify_lti_data(self, actual, expected):
for key, value in expected.items():
self.assertEqual(value, actual[key])
def _verify_lti_created(self, user, expected_lti_data, custom_key=None):
key = custom_key if custom_key else ''
lti_data = LtiUserData.objects.get(user=user, custom_key=key)
self.assertIsNotNone(lti_data)
self.assertEqual(lti_data.custom_key, key)
for key, value in expected_lti_data.items():
self.assertEqual(value, lti_data.edx_lti_parameters[key])
class AnonymousLtiRequestTests(LtiRequestsTestBase):
def setUp(self):
super(AnonymousLtiRequestTests, self).setUp()
self.hook_manager.anonymous_redirect_to = Mock(return_value=self.DEFAULT_REDIRECT)
def test_given_incorrect_payload_throws_bad_request(self):
response = self.send_lti_request(self.get_incorrect_lti_payload())
self.assertEqual(response.status_code, 400)
self.assertIn("Invalid LTI Request", response.content)
def test_given_correct_requests_sets_session_variable(self):
response = self.send_lti_request(self.get_correct_lti_payload())
self._verify_redirected_to(response, self.DEFAULT_REDIRECT)
self._verify_session_lti_contents(self.client.session, self._data)
@ddt.ddt
@patch('django_lti_tool_provider.views.Signals.LTI.received.send')
class AuthenticatedLtiRequestTests(LtiRequestsTestBase):
def _authentication_hook(self, request, user_id=None, username=None, email=None, **kwargs):
user = User.objects.create_user(username or user_id, password='1234', email=email)
user.save()
authenticated_user = authenticate(request, username=user.username, password='1234')
login(request, authenticated_user)
return user
def setUp(self):
super(AuthenticatedLtiRequestTests, self).setUp()
self.hook_manager.authenticated_redirect_to = Mock(return_value=self.DEFAULT_REDIRECT)
self.hook_manager.authentication_hook = self._authentication_hook
def _verify_lti_updated_signal_is_sent(self, patched_send_lti_received, expected_user):
expected_lti_data = LtiUserData.objects.get(user=expected_user)
patched_send_lti_received.assert_called_once_with(LTIView, user=expected_user, lti_data=expected_lti_data)
def test_no_session_given_incorrect_payload_throws_bad_request(self, _):
response = self.send_lti_request(self.get_incorrect_lti_payload())
self.assertEqual(response.status_code, 400)
self.assertIn("Invalid LTI Request", response.content)
def test_no_session_correct_payload_processes_lti_request(self, patched_send_lti_received):
# Precondition check
self.assertFalse(LtiUserData.objects.all())
response = self.send_lti_request(self.get_correct_lti_payload())
# Should have been created.
user = User.objects.all()[0]
self._verify_lti_created(user, self._data)
self._verify_redirected_to(response, self.DEFAULT_REDIRECT)
self._verify_lti_updated_signal_is_sent(patched_send_lti_received, user)
def test_given_session_and_lti_uses_lti(self, patched_send_lti_received):
# Precondition check
self.assertFalse(LtiUserData.objects.all())
session = self.client.session
session['lti_parameters'] = {}
session.save()
response = self.send_lti_request(self.get_correct_lti_payload())
# Should have been created.
user = User.objects.all()[0]
self._verify_lti_created(user, self._data)
self._verify_redirected_to(response, self.DEFAULT_REDIRECT)
self._verify_lti_updated_signal_is_sent(patched_send_lti_received, user)
def test_force_login_change(self, patched_send_lti_received):
self.assertFalse(User.objects.exclude(id=1))
payload = self.get_correct_lti_payload()
request = self.send_lti_request(payload, client=RequestFactory())
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore()
request.user = None
user = self._authentication_hook(request, username='goober')
request.session.save()
self.assertEqual(request.user, user)
LTIView.as_view()(request)
# New user creation not actually available during tests.
self.assertTrue(request.user)
new_user = User.objects.exclude(username='goober')[0]
self.assertEqual(request.user, new_user)
# Verify a new user is not created with the same data if re-visiting.
request = self.send_lti_request(payload, client=RequestFactory())
request.session = engine.SessionStore()
request.user = None
authenticated_user = authenticate(request, username=new_user.username, password='1234')
self.assertTrue(authenticated_user)
login(request, authenticated_user)
LTIView.as_view()(request)
self.assertEqual(request.user, authenticated_user)
self.assertEqual(authenticated_user, new_user)
self.assertEqual(LtiUserData.objects.all().count(), 1)
@ddt.ddt
class AuthenticationManagerIntegrationTests(LtiRequestsTestBase):
TEST_URLS = "/some_url", "/some_other_url", "http://qwe.asd.zxc.com"
def setUp(self):
super(AuthenticationManagerIntegrationTests, self).setUp()
def tearDown(self):
LTIView.authentication_manager = None
self._logout()
def _authenticate_user(self, request, user_id=None, username=None, email=None, **kwargs):
if not username:
username = "test_username"
password = "test_password"
user = User.objects.create_user(username=username, email=email, password=password)
authenticated_user = authenticate(request, username=username, password=password)
login(request, authenticated_user)
self.addCleanup(lambda: user.delete())
def test_authentication_hook_executed_if_not_authenticated(self):
payload = self.get_correct_lti_payload()
self.send_lti_request(payload)
args, user_data = self.hook_manager.authentication_hook.call_args
request = args[0]
self.assertEqual(request.body, payload)
self.assertFalse(request.user.is_authenticated)
expected_user_data = {
'username': self._data['lis_person_sourcedid'],
'email': self._data['lis_person_contact_email_primary'],
'user_id': self._data['user_id'],
'extra_params': {}
}
self.assertEqual(user_data, expected_user_data)
def test_authentication_hook_passes_optional_lti_data(self):
payload = self.get_correct_lti_payload()
self.hook_manager.optional_lti_parameters.return_value = {'resource_link_id': 'link_id', 'roles': 'roles'}
self.send_lti_request(payload)
args, user_data = self.hook_manager.authentication_hook.call_args
request = args[0]
self.assertEqual(request.body, payload)
self.assertFalse(request.user.is_authenticated)
expected_user_data = {
'username': self._data['lis_person_sourcedid'],
'email': self._data['lis_person_contact_email_primary'],
'user_id': self._data['user_id'],
'extra_params': {
'roles': ['Student'],
'link_id': 'resource_link_id',
}
}
self.assertEqual(user_data, expected_user_data)
@ddt.data(*TEST_URLS)
def test_anonymous_lti_is_processed_if_hook_does_not_authenticate_user(self, expected_url):
self.hook_manager.anonymous_redirect_to.return_value = expected_url
response = self.send_lti_request(self.get_correct_lti_payload())
self._verify_redirected_to(response, expected_url)
self._verify_session_lti_contents(self.client.session, self._data)
# verifying correct parameters were passed to auth manager hook
request, lti_data = self.hook_manager.anonymous_redirect_to.call_args[0]
self._verify_session_lti_contents(request.session, self._data)
self._verify_lti_data(lti_data, self._data)
@ddt.data(*TEST_URLS)
def test_authenticated_lti_is_processed_if_hook_authenticates_user(self, expected_url):
self.hook_manager.authentication_hook.side_effect = self._authenticate_user
self.hook_manager.authenticated_redirect_to.return_value = expected_url
response = self.send_lti_request(self.get_correct_lti_payload())
self._verify_redirected_to(response, expected_url)
# verifying correct parameters were passed to auth manager hook
request, lti_data = self.hook_manager.authenticated_redirect_to.call_args[0]
user = request.user
self._verify_lti_created(user, self._data)
self._verify_lti_data(lti_data, self._data)
@ddt.data('custom', 'very custom', 'extremely custom')
def test_authenticated_lti_saves_custom_key_if_specified(self, key):
self.hook_manager.vary_by_key.return_value = key
self.hook_manager.authentication_hook.side_effect = self._authenticate_user
self.send_lti_request(self.get_correct_lti_payload())
request, lti_data = self.hook_manager.authenticated_redirect_to.call_args[0]
user = request.user
self._verify_lti_created(user, self._data, key)
| open-craft/django-lti-tool-provider | django_lti_tool_provider/tests/test_views.py | Python | agpl-3.0 | 12,681 |
import pytest
from django.urls import reverse
from adhocracy4.dashboard import components
from adhocracy4.test.helpers import assert_template_response
from adhocracy4.test.helpers import redirect_target
from adhocracy4.test.helpers import setup_phase
from meinberlin.apps.topicprio.models import Topic
from meinberlin.apps.topicprio.phases import PrioritizePhase
component = components.modules.get('topic_edit')
@pytest.mark.django_db
def test_edit_view(client, phase_factory, topic_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
url = component.get_base_url(module)
client.login(username=initiator.email, password='password')
response = client.get(url)
assert_template_response(response,
'meinberlin_topicprio/topic_dashboard_list.html')
@pytest.mark.django_db
def test_topic_create_view(client, phase_factory, category_factory):
phase, module, project, item = setup_phase(
phase_factory, None, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
category = category_factory(module=module)
url = reverse('a4dashboard:topic-create',
kwargs={'module_slug': module.slug})
data = {
'name': 'test',
'description': 'test',
'category': category.pk
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'topic-list'
topic = Topic.objects.get(name=data.get('name'))
assert topic.description == data.get('description')
assert topic.category.pk == data.get('category')
@pytest.mark.django_db
def test_topic_update_view(
client, phase_factory, topic_factory, category_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
category = category_factory(module=module)
url = reverse('a4dashboard:topic-update',
kwargs={'pk': item.pk, 'year': item.created.year})
data = {
'name': 'test',
'description': 'test',
'category': category.pk
}
client.login(username=initiator.email, password='password')
response = client.post(url, data)
assert redirect_target(response) == 'topic-list'
item.refresh_from_db()
assert item.description == data.get('description')
assert item.category.pk == data.get('category')
@pytest.mark.django_db
def test_topic_delete_view(client, phase_factory, topic_factory):
phase, module, project, item = setup_phase(
phase_factory, topic_factory, PrioritizePhase)
initiator = module.project.organisation.initiators.first()
url = reverse('a4dashboard:topic-delete',
kwargs={'pk': item.pk, 'year': item.created.year})
client.login(username=initiator.email, password='password')
response = client.delete(url)
assert redirect_target(response) == 'topic-list'
assert not Topic.objects.exists()
| liqd/a4-meinberlin | tests/topicprio/dashboard_components/test_views_module_topics.py | Python | agpl-3.0 | 3,141 |
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
from flask.ext.security import current_user
mod = Blueprint('documentation', __name__)
@mod.route('/documentation')
@mod.route('/documentation/index')
def doc_index():
return render_template('documentation/index.html',
apikey='token' if current_user.is_anonymous else current_user.apikey)
| odtvince/APITaxi | APITaxi/documentation/index.py | Python | agpl-3.0 | 381 |
# Copyright (C) 2020 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import unittest
from gateway.dto import SensorDTO, SensorSourceDTO
from gateway.api.serializers import SensorSerializer
class SensorSerializerTest(unittest.TestCase):
def test_serialize(self):
# Valid room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5),
fields=['id', 'name', 'room'])
self.assertEqual({'id': 1,
'name': 'foo',
'room': 5}, data)
# Empty room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo'),
fields=['id', 'name', 'room'])
self.assertEqual({'id': 1,
'name': 'foo',
'room': 255}, data)
# No room
data = SensorSerializer.serialize(SensorDTO(id=1, name='foo', room=5),
fields=['id', 'name'])
self.assertEqual({'id': 1,
'name': 'foo'}, data)
def test_deserialize(self):
# Valid room
dto = SensorSerializer.deserialize({'id': 5,
'external_id': '0',
'source': {'type': 'master'},
'physical_quantity': 'temperature',
'unit': 'celcius',
'name': 'bar',
'room': 10})
expected_dto = SensorDTO(id=5,
external_id='0',
source=SensorSourceDTO('master', name=None),
physical_quantity='temperature',
unit='celcius',
name='bar',
room=10)
assert expected_dto == dto
self.assertEqual(expected_dto, dto)
self.assertEqual(['external_id', 'id', 'name', 'physical_quantity', 'room', 'source', 'unit'], sorted(dto.loaded_fields))
# Empty room
dto = SensorSerializer.deserialize({'id': 5,
'name': 'bar',
'room': 255})
self.assertEqual(SensorDTO(id=5, name='bar'), dto)
self.assertEqual(['id', 'name', 'room'], sorted(dto.loaded_fields))
# No room
dto = SensorSerializer.deserialize({'id': 5,
'name': 'bar'})
self.assertEqual(SensorDTO(id=5, name='bar'), dto)
self.assertEqual(['id', 'name'], sorted(dto.loaded_fields))
# Invalid physical_quantity
with self.assertRaises(ValueError):
_ = SensorSerializer.deserialize({'id': 5,
'physical_quantity': 'something',
'unit': 'celcius',
'name': 'bar'})
# Invalid unit
with self.assertRaises(ValueError):
_ = SensorSerializer.deserialize({'id': 5,
'physical_quantity': 'temperature',
'unit': 'unicorns',
'name': 'bar'})
| openmotics/gateway | testing/unittests/api_tests/serializers/sensor_test.py | Python | agpl-3.0 | 4,097 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2012 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
# Copyright (C) 2014 Didotech srl
# (<http://www.didotech.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from openerp import SUPERUSER_ID
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.DEBUG)
class stock_location(orm.Model):
_inherit = "stock.location"
_columns = {
'update_product_bylocation': fields.boolean('Show Product location quantity on db', help='If check create a columns on product_product table for get product for this location'),
'product_related_columns': fields.char('Columns Name on product_product')
}
def update_product_by_location(self, cr, uid, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
location_ids = self.search(cr, uid, [('update_product_bylocation', '=', True)], context=context)
location_vals = {}
start_time = datetime.now()
date_product_by_location_update = start_time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if location_ids:
product_obj = self.pool['product.product']
for location in self.browse(cr, uid, location_ids, context):
location_vals[location.id] = location.product_related_columns
product_ids = product_obj.search(cr, uid, [('type', '!=', 'service')], context=context)
product_context = context.copy()
product_vals = {}
for product_id in product_ids:
product_vals[product_id] = {}
for location_keys in location_vals.keys():
product_context['location'] = location_keys
for product in product_obj.browse(cr, uid, product_ids, product_context):
if location_vals[location_keys] and (product[location_vals[location_keys]] != product.qty_available):
product_vals[product.id][location_vals[location_keys]] = product.qty_available
if product_vals:
for product_id in product_vals.keys():
product_val = product_vals[product_id]
if product_val:
product_val['date_product_by_location_update'] = date_product_by_location_update
product_obj.write(cr, uid, product_id, product_val, context)
end_time = datetime.now()
duration_seconds = (end_time - start_time)
duration = '{sec}'.format(sec=duration_seconds)
_logger.info(u'update_product_by_location get in {duration}'.format(duration=duration))
return True
def create_product_by_location(self, cr, location_name, context):
model_id = self.pool['ir.model.data'].get_object_reference(cr, SUPERUSER_ID, 'product', 'model_product_product')[1]
fields_value = {
'field_description': location_name,
'groups': [[6, False, []]],
'model_id': model_id,
'name': 'x_{location_name}'.format(location_name=location_name).lower().replace(' ', '_'),
'readonly': False,
'required': False,
'select_level': '0',
'serialization_field_id': False,
'translate': False,
'ttype': 'float',
}
context_field = context.copy()
context_field.update(
{
'department_id': False,
'lang': 'it_IT',
'manual': True, # required for create columns on table
'uid': 1
}
)
fields_id = self.pool['ir.model.fields'].create(cr, SUPERUSER_ID, fields_value, context_field)
return fields_id, fields_value['name']
def write(self, cr, uid, ids, vals, context=None):
context = context or self.pool['res.users'].context_get(cr, uid)
if vals.get('update_product_bylocation', False):
for location in self.browse(cr, uid, ids, context):
field_id, field_name = self.create_product_by_location(cr, location.name, context)
vals['product_related_columns'] = field_name
return super(stock_location, self).write(cr, uid, ids, vals, context)
| iw3hxn/LibrERP | stock_picking_extended/models/inherit_stock_location.py | Python | agpl-3.0 | 5,169 |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
from coriolis import utils
from coriolis.conductor.rpc import client as rpc_conductor_client
from coriolis.minion_manager.rpc import client as rpc_minion_manager_client
class API(object):
def __init__(self):
self._rpc_conductor_client = rpc_conductor_client.ConductorClient()
self._rpc_minion_manager_client = (
rpc_minion_manager_client.MinionManagerClient())
def create(self, ctxt, name, endpoint_type, description,
connection_info, mapped_regions):
return self._rpc_conductor_client.create_endpoint(
ctxt, name, endpoint_type, description, connection_info,
mapped_regions)
def update(self, ctxt, endpoint_id, properties):
return self._rpc_conductor_client.update_endpoint(
ctxt, endpoint_id, properties)
def delete(self, ctxt, endpoint_id):
self._rpc_conductor_client.delete_endpoint(ctxt, endpoint_id)
def get_endpoints(self, ctxt):
return self._rpc_conductor_client.get_endpoints(ctxt)
def get_endpoint(self, ctxt, endpoint_id):
return self._rpc_conductor_client.get_endpoint(ctxt, endpoint_id)
def validate_connection(self, ctxt, endpoint_id):
return self._rpc_conductor_client.validate_endpoint_connection(
ctxt, endpoint_id)
@utils.bad_request_on_error("Invalid destination environment: %s")
def validate_target_environment(self, ctxt, endpoint_id, target_env):
return self._rpc_conductor_client.validate_endpoint_target_environment(
ctxt, endpoint_id, target_env)
@utils.bad_request_on_error("Invalid source environment: %s")
def validate_source_environment(self, ctxt, endpoint_id, source_env):
return self._rpc_conductor_client.validate_endpoint_source_environment(
ctxt, endpoint_id, source_env)
@utils.bad_request_on_error("Invalid source minion pool environment: %s")
def validate_endpoint_source_minion_pool_options(
self, ctxt, endpoint_id, pool_environment):
return self._rpc_minion_manager_client.validate_endpoint_source_minion_pool_options(
ctxt, endpoint_id, pool_environment)
@utils.bad_request_on_error(
"Invalid destination minion pool environment: %s")
def validate_endpoint_destination_minion_pool_options(
self, ctxt, endpoint_id, pool_environment):
return self._rpc_minion_manager_client.validate_endpoint_destination_minion_pool_options(
ctxt, endpoint_id, pool_environment)
| cloudbase/coriolis | coriolis/endpoints/api.py | Python | agpl-3.0 | 2,592 |
"""
Global settings file.
Everything in here is imported *before* everything in settings.py.
This means that this file is used for default, fixed and global varibles, and
then settings.py is used to overwrite anything here as well as adding settings
particular to the install.
Note that there are no tuples here, as they are immutable. Please use lists, so
that in settings.py we can do list.append()
"""
import os
from os.path import exists, join
# This shouldn't be needed, however in some cases the buildout version of
# django (in bin/django) may not make the paths correctly
import sys
sys.path.append('web')
# Django settings for scraperwiki project.
DEBUG = True
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en_GB'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
HOME_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # the parent directory of SCRAPERWIKI_DIR
SCRAPERWIKI_DIR = HOME_DIR + '/web/'
MEDIA_DIR = SCRAPERWIKI_DIR + 'media'
MEDIA_URL = 'http://media.scraperwiki.com/'
MEDIA_ADMIN_DIR = SCRAPERWIKI_DIR + '/media-admin'
LOGIN_URL = '/login/'
HOME_DIR = ""
# MySQL default overdue scraper query
OVERDUE_SQL = "(DATE_ADD(last_run, INTERVAL run_interval SECOND) < NOW() or last_run is null)"
OVERDUE_SQL_PARAMS = []
# URL that handles the media served from MEDIA_ROOT. Make sure to use a trailing slash.
URL_ROOT = ""
MEDIA_ROOT = URL_ROOT + 'media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a trailing slash.
ADMIN_MEDIA_PREFIX = URL_ROOT + '/media-admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'x*#sb54li2y_+b-ibgyl!lnd^*#=bzv7bj_ypr2jvon9mwii@z'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
MIDDLEWARE_CLASSES = [
'middleware.exception_logging.ExceptionLoggingMiddleware',
'middleware.improved_gzip.ImprovedGZipMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_notify.middleware.NotificationsMiddleware',
'pagination.middleware.PaginationMiddleware',
'middleware.csrfcookie.CsrfAlwaysSetCookieMiddleware',
'api.middleware.CORSMiddleware'
]
AUTHENTICATION_BACKENDS = [
'frontend.email_auth.EmailOrUsernameModelBackend',
'django.contrib.auth.backends.ModelBackend'
]
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = [
join(SCRAPERWIKI_DIR, 'templates'),
]
TEMPLATE_CONTEXT_PROCESSORS = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
'django_notify.context_processors.notifications',
'frontend.context_processors.site',
'frontend.context_processors.template_settings',
'frontend.context_processors.vault_info',
# 'frontend.context_processors.site_messages', # disabled as not used since design revamp April 2011
]
SCRAPERWIKI_APPS = [
# the following are scraperwiki apps
'frontend',
'codewiki',
'api',
'cropper',
'kpi',
'documentation',
#'devserver',
]
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.comments',
'django.contrib.markup',
'registration',
'south',
'profiles',
'django.contrib.humanize',
'django.contrib.messages',
'django_notify',
'tagging',
'contact_form',
'captcha',
'pagination',
'compressor',
] + SCRAPERWIKI_APPS
TEST_RUNNER = 'scraperwiki_tests.run_tests'
ACCOUNT_ACTIVATION_DAYS = 3650 # If you haven't activated in 10 years then tough luck!
# tell Django that the frontent user_profile model is to be attached to the
# user model in the admin side.
AUTH_PROFILE_MODULE = 'frontend.UserProfile'
INTERNAL_IPS = ['127.0.0.1',]
NOTIFICATIONS_STORAGE = 'session.SessionStorage'
REGISTRATION_BACKEND = "frontend.backends.UserWithNameBackend"
#tagging
FORCE_LOWERCASE_TAGS = True
# define default directories needed for paths to run scrapers
SCRAPER_LIBS_DIR = join(HOME_DIR, "scraperlibs")
#send broken link emails
SEND_BROKEN_LINK_EMAILS = DEBUG == False
#pagingation
SCRAPERS_PER_PAGE = 50
#API
MAX_API_ITEMS = 500
DEFAULT_API_ITEMS = 100
# Make "view on site" work for user models
# https://docs.djangoproject.com/en/dev/ref/settings/?#absolute-url-overrides
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda o: o.get_profile().get_absolute_url()
}
# Required for the template_settings context processor. Each varible listed
# here will be made availible in all templates that are passed the
# RequestContext. Be careful of listing database and other private settings
# here
TEMPLATE_SETTINGS = [
'API_URL',
'ORBITED_URL',
'MAX_DATA_POINTS',
'MAX_MAP_POINTS',
'REVISION',
'VIEW_URL',
'CODEMIRROR_URL'
]
try:
REVISION = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'revision.txt')).read()[:-1]
except:
REVISION = ""
MAX_DATA_POINTS = 500
BLOG_FEED = 'http://blog.scraperwiki.com/feed/atom'
DATA_TABLE_ROWS = 10
RSS_ITEMS = 50
VIEW_SCREENSHOT_SIZES = {'small': (110, 73), 'medium': (220, 145), 'large': (800, 600)}
SCRAPER_SCREENSHOT_SIZES = {'small': (110, 73), 'medium': (220, 145) }
CODEMIRROR_VERSION = "0.94"
CODEMIRROR_URL = "CodeMirror-%s/" % CODEMIRROR_VERSION
APPROXLENOUTPUTLIMIT = 3000
CONFIGFILE = "/var/www/scraperwiki/uml/uml.cfg"
HTTPPROXYURL = "http://localhost:9005"
DISPATCHERURL = "http://localhost:9000"
PAGINATION_DEFAULT_PAGINATION=20
# tell south to do migrations when doing tests
SOUTH_TESTS_MIGRATE = True
# To be overridden in actual settings files
SESSION_COOKIE_SECURE = False
# Enable logging of errors to text file, taken from:
# http://stackoverflow.com/questions/238081/how-do-you-log-server-errors-on-django-sites
import logging
from middleware import exception_logging
logging.custom_handlers = exception_logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'simple': {
'format' : '%(asctime)s %(name)s %(filename)s:%(lineno)s %(levelname)s: %(message)s'
}
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
# Log to a text file that can be rotated by logrotate
'logfile': {
'class': 'logging.custom_handlers.WorldWriteRotatingFileHandler',
'filename': '/var/log/scraperwiki/django-www.log',
'mode': 'a',
'maxBytes': 100000,
'backupCount': 5,
'formatter': 'simple'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
# (so use empty string for name here to catch anything)
'': {
'handlers': ['logfile'],
'level': DEBUG and 'DEBUG' or 'ERROR',
'propagate': False,
},
# Your own app - this assumes all your logger names start with "myapp."
#'myapp': {
# 'handlers': ['logfile'],
# 'level': 'WARNING', # Or maybe INFO or DEBUG
# 'propagate': False
#},
},
}
# Javascript templating
INSTALLED_APPS += ['icanhaz']
ICANHAZ_DIRS = [SCRAPERWIKI_DIR + 'templates/codewiki/js/']
| lkundrak/scraperwiki | web/global_settings.py | Python | agpl-3.0 | 8,733 |
import logging, logging.handlers
import sys
logging.handlers.HTTPHandler('','',method='GET')
logger = logging.getLogger('simple_example')
# http_handler = logging.handlers.HTTPHandler('127.0.0.1:9022', '/event', method='GET')
http_handler = logging.handlers.HTTPHandler('127.0.0.1:9999', '/httpevent', method='GET')
logger.addHandler(http_handler)
#logger.setLevel(logging.DEBUG)
f=open(sys.argv[1])
for i in range(10):
line = f.readline()
print line
logger.critical(line)
## For reference, the exert of the relevant Python logger
# import errno, logging, socket, os, pickle, struct, time, re
# from codecs import BOM_UTF8
# from stat import ST_DEV, ST_INO, ST_MTIME
# import queue
# try:
# import threading
# except ImportError: #pragma: no cover
# threading = None
# import http.client, urllib.parse
# port = 9022
# method = "GET"
# host = "127.0.0.1"
# url = "/"
# h = http.client.HTTPConnection(host)
# url = url + "?%s" % (sep, data)
# for item in lines:
# data = urllib.parse.urlencode(record)
# h.putrequest(method, url)
# h.putheader("Host", host)
# if method == "POST":
# h.putheader("Content-type",
# "application/x-www-form-urlencoded")
# h.putheader("Content-length", str(len(data)))
# h.send(data.encode('utf-8'))
# h.getresponse() #can't do anything with the result
| edx/edxanalytics | src/util/playback.py | Python | agpl-3.0 | 1,385 |
# ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
# ##############################################################################
import attr
from osis_common.ddd import interface
@attr.s(frozen=True, slots=True)
class EntiteUclDTO(interface.DTO):
sigle = attr.ib(type=str)
intitule = attr.ib(type=str)
| uclouvain/osis | infrastructure/shared_kernel/entite/dtos.py | Python | agpl-3.0 | 1,467 |
# Flask modules
from flask import (
Blueprint,
render_template,
redirect,
url_for,
request,
flash,
current_app,
g,
)
# FLask Login
from flask_login import (
current_user,
)
# WTForms
from flask_wtf import Form
from wtforms import (
SubmitField,
BooleanField,
DecimalField,
)
from wtforms.validators import DataRequired
# Mail
from flask_mail import Message
# Modules required for communication with pypayd
import requests
import json
# Other modules
from datetime import datetime
from datetime import timedelta
# Our own modules
from topitup import db
from frontend import login_required
from nav import (
nav,
top_nav
)
# Let's start!
class Payd(db.Model):
__bind_key__ = "topitup"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
time_creation = db.Column(db.DateTime)
time_payment = db.Column(db.DateTime)
order_id = db.Column(db.String(35), unique=True)
native_price = db.Column(db.Integer)
native_currency = db.Column(db.String(3))
btc_price = db.Column(db.Integer)
address = db.Column(db.String(35))
txn = db.Column(db.Integer, default=0)
def __init__(self, id, user_id, time_creation, time_payment, order_id,
native_price, native_currency, btc_price, address, txn):
self.id = id
self.user_id = user_id
self.time_creation = time_creation
self.time_payment = time_payment
self.order_id = order_id
self.native_price = native_price
self.native_currency = native_currency
self.btc_price = btc_price
self.address = address
self.txn = txn
def __repr__(self):
return '<Payd %r>' % self.id
# create sqlite database if it does not exist
try:
db.create_all(bind='topitup')
except:
pass
# Blueprint
siema = Blueprint('siema', __name__)
# Buy credits Form
class LoginForm(Form):
amount = DecimalField('Amount of Credits', validators=[DataRequired()])
confirm_me = BooleanField('Please confirm you agree to TOC',
validators=[DataRequired()])
submit = SubmitField("Buy Credits")
@siema.before_request
def before_request():
try:
g.user = current_user.username.decode('utf-8')
g.email = current_user.email.decode('utf-8')
# amount of Credits in user's account
g.credits = current_user.neuro
g.user_id = current_user.id
except:
g.user = None
g.credits = None
nav.register_element('top_nav', top_nav(g.user, g.credits))
# run every minute from cron to check for payments
@siema.route('/invoices/checkitup')
def checkitup():
# we collect all invoices which are not paid
sql_query = Payd.query.filter_by(
time_payment=datetime.fromtimestamp(0)).all()
for invoice in sql_query:
print(invoice)
howold = current_app.config['WARRANTY_TIME']
# ignore all invoices which are older than WARRANTY_TIME days
if invoice.time_creation + timedelta(days=howold) > datetime.now():
print(invoice.order_id)
# initiate conversation with pypayd
pypayd_headers = {'content-type': 'application/json'}
pypayd_payload = {
"method": "check_order_status",
"params": {"order_id": invoice.order_id},
"jsonrpc": "2.0",
"id": 0,
}
#pypayd_response = requests.post(
# current_app.config['PYPAYD_URI'],
# data=json.dumps(pypayd_payload),
# headers=pypayd_headers).json()
#print(pypayd_response)
#invoice.txn = 0
howmanyconfirmations = current_app.config['CONFIRMATIONS']
confirmations = pypayd_response['result']['amount']
# Huhu! We have a new payment!
if invoice.txn == 0 and confirmations > howmanyconfirmations:
# Send an email message if payment was registered
# From: DEFAULT_MAIL_SENDER
msg = Message()
msg.add_recipient(current_user.email)
msg.subject = "Payment confirmation"
msg.body = ""
# Register payment
invoice.time_payment = datetime.now()
# Register paid amount in the main database
balance = current_user.credits
current_user.credits = balance + pypayd_response['result']['amount']
# Housekeeping
invoice.txn = confirmations
# register all transactions in databases
db.session.commit()
flash('Thank you.', 'info')
return redirect(url_for('frontend.index'))
@siema.route('/invoices/id/<orderid>')
@login_required
def showinvoice(orderid):
sql_query = Payd.query.filter_by(
order_id=orderid).first()
return render_template('invoice-id.html',
invoice=sql_query,
)
@siema.route('/invoices/new', methods=('GET', 'POST'))
@login_required
def new():
form = LoginForm()
if form.validate_on_submit():
amount = request.form['amount']
confirm_me = False
if 'confirm_me' in request.form:
confirm_me = True
if confirm_me is False:
pass
# get a new transaction id
sql_query = Payd.query.all()
new_local_transaction_id = len(sql_query)
# TODO: deal with an unlikely event of concurrency
# initiate conversation with pypayd
pypayd_headers = {'content-type': 'application/json'}
pypayd_payload = {
"method": "create_order",
"params": {"amount": amount, "qr_code": True},
"jsonrpc": "2.0",
"id": new_local_transaction_id,
}
pypayd_response = requests.post(
current_app.config['PYPAYD_URI'],
data=json.dumps(pypayd_payload),
headers=pypayd_headers).json()
print(pypayd_response)
# insert stuff into our transaction database
to_db = Payd(
None,
g.user_id,
datetime.utcnow(),
datetime.fromtimestamp(0), # this is not a paid invoice, yet
pypayd_response['result']['order_id'],
amount,
"EUR",
pypayd_response['result']['amount'],
pypayd_response['result']['receiving_address'],
0,
)
db.session.add(to_db)
db.session.commit()
payme = {
'credits': amount,
'btc': pypayd_response['result']['amount'],
'address': pypayd_response['result']['receiving_address'],
'image': pypayd_response['result']['qr_image'],
}
# generate approximate time to pay the invoice
pay_time = datetime.now() + timedelta(minutes=45)
# and finally show an invoice to the customer
return render_template('invoice-payme.html',
payme=payme,
pay_time=pay_time)
return render_template('invoice-new.html', form=form)
# user has access to his own invoices only
@siema.route('/invoices/', defaults={'page': 1})
@siema.route('/invoices/page/<int:page>')
@login_required
def index(page):
# downloading all records related to user
sql_query = Payd.query.filter_by(
user_id=g.user_id).paginate(page,
current_app.config['INVOICES_PER_PAGE'])
return render_template('invoices.html',
invoices=sql_query,
)
# admin has access to all invoices
@siema.route('/admin/', defaults={'page': 1})
@siema.route('/admin/page/<int:page>')
@login_required
def admin(page):
# only user with id = 666 can enter this route
if g.user_id == 666:
sql_query = Payd.query.paginate(page, 50)
return render_template('invoices.html',
invoices=sql_query,
)
else:
flash('You are not admin and you can see your own invoices only!',
'warning')
return redirect(url_for('siema.index'))
| ser/topitup | siema.py | Python | agpl-3.0 | 8,241 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.dispatch import receiver
from assessments.business import scores_encodings_deadline
from base.signals import publisher
@receiver(publisher.compute_scores_encodings_deadlines)
def compute_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.compute_deadline(kwargs['offer_year_calendar'])
@receiver(publisher.compute_student_score_encoding_deadline)
def compute_student_score_encoding_deadline(sender, **kwargs):
scores_encodings_deadline.compute_deadline_by_student(kwargs['session_exam_deadline'])
@receiver(publisher.compute_all_scores_encodings_deadlines)
def compute_all_scores_encodings_deadlines(sender, **kwargs):
scores_encodings_deadline.recompute_all_deadlines(kwargs['academic_calendar'])
| uclouvain/osis_louvain | assessments/signals/subscribers.py | Python | agpl-3.0 | 2,029 |
from django.contrib import auth
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls.base import reverse
class TestAccountRegistration(TestCase):
def setUp(self):
# create one user for convenience
response = self.client.post(
reverse('account:register'),
{
'username': 'Alice',
'email': 'alice@localhost',
'password': 'supasecret',
'password2': 'supasecret',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_registration(self):
self.assertEqual(len(User.objects.all()), 1)
user = User.objects.get(username='Alice')
self.assertEqual(user.email, 'alice@localhost')
response = self.client.post(
reverse('account:register'),
{
'username': 'Bob',
'email': 'bob@localhost',
'password': 'foo',
'password2': 'foo',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(User.objects.all()), 2)
def test_duplicate_username(self):
response = self.client.post(
reverse('account:register'),
{
'username': 'Alice',
'email': 'alice2@localhost',
'password': 'supasecret',
'password2': 'supasecret',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:register'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(User.objects.all()), 1)
def test_duplicate_email(self):
response = self.client.post(
reverse('account:register'),
{
'username': 'Alice2000',
'email': 'alice@localhost',
'password': 'supasecret',
'password2': 'supasecret',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:register'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(User.objects.all()), 1)
def test_non_matching_passwords(self):
response = self.client.post(
reverse('account:register'),
{
'username': 'Bob',
'email': 'bob@localhost',
'password': 'foo',
'password2': 'bar',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:register'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(User.objects.all()), 1)
def test_form_view(self):
response = self.client.get(reverse('account:register'))
self.assertEqual(response.status_code, 200)
class TestLogin(TestCase):
def setUp(self):
# create one user for convenience
response = self.client.post(
reverse('account:register'),
{
'username': 'Alice',
'email': 'alice@localhost',
'password': 'supasecret',
'password2': 'supasecret',
},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_login(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:home'))
self.assertEqual(response.status_code, 200)
def test_disabled_login(self):
user = User.objects.all().update(is_active=False)
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_wrong_credentials(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'wrong'},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_wrong_user(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Bob', 'password': 'supasecret'},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_login_view(self):
response = self.client.get(reverse('account:login'))
self.assertEqual(response.status_code, 200)
def test_login_view_being_logged_in(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
response = self.client.get(
reverse('account:login'),
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:home'))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertEqual(response.redirect_chain[0][0], reverse('account:home'))
self.assertEqual(response.status_code, 200)
def test_home_view_while_not_logged_in(self):
response = self.client.get(reverse('account:home'), follow=True)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:login')))
self.assertEqual(response.status_code, 200)
def test_home_view_while_logged_in(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
response = self.client.get(reverse('account:home'))
self.assertEqual(response.status_code, 200)
def test_register_view_while_logged_in(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
response = self.client.get(reverse('account:register'), follow=True)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertTrue(response.redirect_chain[0][0].startswith(reverse('account:home')))
self.assertEqual(response.status_code, 200)
def test_logout(self):
response = self.client.post(
reverse('account:login'),
{'username': 'Alice', 'password': 'supasecret'},
follow=True
)
user = auth.get_user(self.client)
self.assertTrue(user.is_authenticated)
response = self.client.get(reverse('account:logout'), follow=True)
self.assertEqual(response.redirect_chain[0][1], 302)
self.assertTrue(response.redirect_chain[0][0].startswith(reverse('base:home')))
self.assertEqual(response.status_code, 200)
user = auth.get_user(self.client)
self.assertFalse(user.is_authenticated)
| jardiacaj/finem_imperii | account/tests.py | Python | agpl-3.0 | 8,495 |
import analytics
import anyjson
from channels import Group
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from lily.accounts.api.serializers import RelatedAccountSerializer
from lily.api.fields import SanitizedHtmlCharField
from lily.api.nested.mixins import RelatedSerializerMixin
from lily.api.nested.serializers import WritableNestedSerializer
from lily.api.serializers import ContentTypeSerializer
from lily.contacts.api.serializers import RelatedContactSerializer
from lily.contacts.models import Function
from lily.users.api.serializers import RelatedLilyUserSerializer, RelatedTeamSerializer
from lily.utils.api.serializers import RelatedTagSerializer
from lily.utils.request import is_external_referer
from ..models import Case, CaseStatus, CaseType
class CaseStatusSerializer(serializers.ModelSerializer):
"""
Serializer for case status model.
"""
class Meta:
model = CaseStatus
fields = (
'id',
'name',
)
class RelatedCaseStatusSerializer(RelatedSerializerMixin, CaseStatusSerializer):
pass
class CaseTypeSerializer(serializers.ModelSerializer):
"""
Serializer for case type model.
"""
class Meta:
model = CaseType
fields = (
'id',
'is_archived',
'name',
'use_as_filter',
)
class RelatedCaseTypeSerializer(RelatedSerializerMixin, CaseTypeSerializer):
pass
class CaseSerializer(WritableNestedSerializer):
"""
Serializer for the case model.
"""
# Set non mutable fields.
created_by = RelatedLilyUserSerializer(read_only=True)
content_type = ContentTypeSerializer(
read_only=True,
help_text='This is what the object is identified as in the back-end.',
)
# Related fields.
account = RelatedAccountSerializer(
required=False,
allow_null=True,
help_text='Account for which the case is being created.',
)
contact = RelatedContactSerializer(
required=False,
allow_null=True,
help_text='Contact for which the case is being created.',
)
assigned_to = RelatedLilyUserSerializer(
required=False,
allow_null=True,
assign_only=True,
help_text='Person which the case is assigned to.',
)
assigned_to_teams = RelatedTeamSerializer(
many=True,
required=False,
assign_only=True,
help_text='List of teams the case is assigned to.',
)
type = RelatedCaseTypeSerializer(
assign_only=True,
help_text='The type of case.',
)
status = RelatedCaseStatusSerializer(
assign_only=True,
help_text='Status of the case.',
)
tags = RelatedTagSerializer(
many=True,
required=False,
create_only=True,
help_text='Any tags used to further categorize the case.',
)
description = SanitizedHtmlCharField(
help_text='Any extra text to describe the case (supports Markdown).',
)
# Show string versions of fields.
priority_display = serializers.CharField(
source='get_priority_display',
read_only=True,
help_text='Human readable value of the case\'s priority.',
)
def validate(self, data):
contact_id = data.get('contact', {})
if isinstance(contact_id, dict):
contact_id = contact_id.get('id')
account_id = data.get('account', {})
if isinstance(account_id, dict):
account_id = account_id.get('id')
if contact_id and account_id:
if not Function.objects.filter(contact_id=contact_id, account_id=account_id).exists():
raise serializers.ValidationError({'contact': _('Given contact must work at the account.')})
# Check if we are related and if we only passed in the id, which means user just wants new reference.
errors = {
'account': _('Please enter an account and/or contact.'),
'contact': _('Please enter an account and/or contact.'),
}
if not self.partial:
# For POST or PUT we always want to check if either is set.
if not (account_id or contact_id):
raise serializers.ValidationError(errors)
else:
# For PATCH only check the data if both account and contact are passed.
if ('account' in data and 'contact' in data) and not (account_id or contact_id):
raise serializers.ValidationError(errors)
return super(CaseSerializer, self).validate(data)
def create(self, validated_data):
user = self.context.get('request').user
assigned_to = validated_data.get('assigned_to')
validated_data.update({
'created_by_id': user.pk,
})
if assigned_to:
Group('tenant-%s' % user.tenant.id).send({
'text': anyjson.dumps({
'event': 'case-assigned',
}),
})
if assigned_to.get('id') != user.pk:
validated_data.update({
'newly_assigned': True,
})
else:
Group('tenant-%s' % user.tenant.id).send({
'text': anyjson.dumps({
'event': 'case-unassigned',
}),
})
instance = super(CaseSerializer, self).create(validated_data)
# Track newly ceated accounts in segment.
if not settings.TESTING:
analytics.track(
user.id,
'case-created', {
'expires': instance.expires,
'assigned_to_id': instance.assigned_to_id if instance.assigned_to else '',
'creation_type': 'automatic' if is_external_referer(self.context.get('request')) else 'manual',
},
)
return instance
def update(self, instance, validated_data):
user = self.context.get('request').user
status_id = validated_data.get('status', instance.status_id)
assigned_to = validated_data.get('assigned_to')
if assigned_to:
assigned_to = assigned_to.get('id')
if isinstance(status_id, dict):
status_id = status_id.get('id')
status = CaseStatus.objects.get(pk=status_id)
# Automatically archive the case if the status is set to 'Closed'.
if status.name == 'Closed' and 'is_archived' not in validated_data:
validated_data.update({
'is_archived': True
})
# Check if the case being reassigned. If so we want to notify that user.
if assigned_to and assigned_to != user.pk:
validated_data.update({
'newly_assigned': True,
})
elif 'assigned_to' in validated_data and not assigned_to:
# Case is unassigned, so clear newly assigned flag.
validated_data.update({
'newly_assigned': False,
})
if (('status' in validated_data and status.name == 'Open') or
('is_archived' in validated_data and not validated_data.get('is_archived'))):
# Case is reopened or unarchived, so we want to notify the user again.
validated_data.update({
'newly_assigned': True,
})
if 'assigned_to' in validated_data or instance.assigned_to_id:
Group('tenant-%s' % user.tenant.id).send({
'text': anyjson.serialize({
'event': 'case-assigned',
}),
})
if (not instance.assigned_to_id or
instance.assigned_to_id and
'assigned_to' in validated_data and
not validated_data.get('assigned_to')):
Group('tenant-%s' % user.tenant.id).send({
'text': anyjson.serialize({
'event': 'case-unassigned',
}),
})
return super(CaseSerializer, self).update(instance, validated_data)
class Meta:
model = Case
fields = (
'id',
'account',
'assigned_to',
'assigned_to_teams',
'contact',
'content_type',
'created',
'created_by',
'description',
'expires',
'is_archived',
'modified',
'newly_assigned',
'priority',
'priority_display',
'status',
'tags',
'subject',
'type',
)
extra_kwargs = {
'created': {
'help_text': 'Shows the date and time when the deal was created.',
},
'expires': {
'help_text': 'Shows the date and time for when the case should be completed.',
},
'modified': {
'help_text': 'Shows the date and time when the case was last modified.',
},
'newly_assigned': {
'help_text': 'True if the assignee was changed and that person hasn\'t accepted yet.',
},
'subject': {
'help_text': 'A short description of the case.',
},
}
class RelatedCaseSerializer(RelatedSerializerMixin, CaseSerializer):
"""
Serializer for the case model when used as a relation.
"""
class Meta:
model = Case
# Override the fields because we don't want related fields in this serializer.
fields = (
'id',
'assigned_to',
'assigned_to_teams',
'created',
'created_by',
'description',
'expires',
'is_archived',
'modified',
'priority',
'priority_display',
'subject',
)
| HelloLily/hellolily | lily/cases/api/serializers.py | Python | agpl-3.0 | 9,977 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2008-2013 AvanzOSC S.L. All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class create_extra_documentation(orm.TransientModel):
_name = 'module.doc.create'
def create_documentation(self, cr, uid, ids, context=None):
doc_obj = self.pool.get('module.doc')
mod_obj = self.pool.get('ir.module.module')
for id in ids:
search_ids = doc_obj.search(cr, uid, [('module_id', '=', id)],
context=context)
if not search_ids:
created_id = doc_obj.create(cr, uid, {'module_id': id},
context=context)
name = doc_obj.onchange_module_id(cr, uid, [created_id], id,
context=context)['value']['name']
doc_obj.write(cr, uid, created_id, {'name': name},
context=context)
mod_obj.write(cr, uid, id, {'doc_id': created_id},
context=context)
else:
for search_id in search_ids:
doc_obj.write(cr, uid, search_id, {'has_info': True},
context=context)
mod_obj.write(cr, uid, id, {'doc_id': search_id},
context=context)
return {
'name': _('Extra documentation'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'module.doc',
'type': 'ir.actions.act_window',
}
def create_documentation_all(self, cr, uid, ids, context):
mod_obj = self.pool.get('ir.module.module')
all_ids = mod_obj.search(cr, uid, [])
return self.create_documentation(cr, uid, all_ids, context)
def create_documentation_installed(self, cr, uid, ids, context):
mod_obj = self.pool.get('ir.module.module')
installed_ids = mod_obj.search(cr, uid, [('state', '=', 'installed')])
return self.create_documentation(cr, uid, installed_ids, context)
| Daniel-CA/odoo-addons | __unported__/avanzosc_module_doc/wizard/create_module_documentation.py | Python | agpl-3.0 | 3,050 |
# -*- coding: utf-8 -*-
import traceback
from ckan.lib.helpers import json
from ckanext.harvest.model import HarvestObject, HarvestObjectExtra
from ckanext.harvest.harvesters import HarvesterBase
from ckanext.geocat.utils import search_utils, csw_processor, ogdch_map_utils, csw_mapping # noqa
from ckanext.geocat.utils.vocabulary_utils import \
(VALID_TERMS_OF_USE, DEFAULT_TERMS_OF_USE)
from ckan.logic.schema import default_update_package_schema,\
default_create_package_schema
from ckan.lib.navl.validators import ignore
import ckan.plugins.toolkit as tk
from ckan import model
from ckan.model import Session
import uuid
import logging
log = logging.getLogger(__name__)
DEFAULT_PERMA_LINK_URL = 'https://www.geocat.ch/geonetwork/srv/ger/md.viewer#/full_view/' # noqa
DEFAULT_PERMA_LINK_LABEL = 'geocat.ch Permalink'
HARVEST_USER = 'harvest'
class GeocatHarvester(HarvesterBase):
'''
The harvester for geocat
'''
def info(self):
return {
'name': 'geocat_harvester',
'title': 'Geocat harvester',
'description': (
'Harvests metadata from geocat (CSW)'
),
'form_config_interface': 'Text'
}
def validate_config(self, config):
if not config:
return config
try:
config_obj = json.loads(config)
except Exception as e:
raise ValueError(
'Configuration could not be parsed. An error {} occured'
.format(e)
)
if 'delete_missing_datasets' in config_obj:
if not isinstance(config_obj['delete_missing_datasets'], bool):
raise ValueError('delete_missing_dataset must be boolean')
if 'rights' in config_obj:
if not config_obj['rights'] in VALID_TERMS_OF_USE:
raise ValueError('{} is not valid as terms of use'
.format(config_obj['rights']))
return config
def _set_config(self, config_str, harvest_source_id):
if config_str:
self.config = json.loads(config_str)
else:
self.config = {}
self.config['rights'] = self.config.get('rights', DEFAULT_TERMS_OF_USE)
if not self.config['rights'] in VALID_TERMS_OF_USE:
self.config['rights'] = DEFAULT_TERMS_OF_USE
self.config['delete_missing_datasets'] = \
self.config.get('delete_missing_datasets', False)
self.config['geocat_perma_link_label'] = \
tk.config.get('ckanext.geocat.permalink_title',
DEFAULT_PERMA_LINK_LABEL)
self.config['geocat_perma_link_url'] = \
self.config.get('geocat_perma_link_url',
tk.config.get('geocat_perma_link_url',
DEFAULT_PERMA_LINK_URL))
self.config['legal_basis_url'] = \
self.config.get('legal_basis_url', None)
organization_slug = \
search_utils.get_organization_slug_for_harvest_source(
harvest_source_id)
self.config['organization'] = organization_slug
log.debug('Using config: %r' % self.config)
def gather_stage(self, harvest_job):
log.debug('In GeocatHarvester gather_stage')
self._set_config(harvest_job.source.config, harvest_job.source.id)
csw_url = harvest_job.source.url
try:
csw_data = csw_processor.GeocatCatalogueServiceWeb(url=csw_url)
gathered_geocat_identifiers = csw_data.get_geocat_id_from_csw()
except Exception as e:
self._save_gather_error(
'Unable to get content for URL: %s: %s / %s'
% (csw_url, str(e), traceback.format_exc()),
harvest_job
)
return []
existing_dataset_infos = \
search_utils.get_dataset_infos_for_organization(
organization_name=self.config['organization'],
harvest_source_id=harvest_job.source_id,
)
gathered_ogdch_identifiers = \
[ogdch_map_utils.map_geocat_to_ogdch_identifier(
geocat_identifier=geocat_identifier,
organization_slug=self.config['organization'])
for geocat_identifier in gathered_geocat_identifiers]
all_ogdch_identifiers = \
set(gathered_ogdch_identifiers + existing_dataset_infos.keys())
packages_to_delete = search_utils.get_packages_to_delete(
existing_dataset_infos=existing_dataset_infos,
gathered_ogdch_identifiers=gathered_ogdch_identifiers,
)
csw_map = csw_mapping.GeoMetadataMapping(
organization_slug=self.config['organization'],
geocat_perma_link=self.config['geocat_perma_link_url'],
geocat_perma_label=self.config['geocat_perma_link_label'],
legal_basis_url=self.config['legal_basis_url'],
default_rights=self.config['rights'],
valid_identifiers=all_ogdch_identifiers,
)
harvest_obj_ids = self.map_geocat_dataset(
csw_data,
csw_map,
gathered_geocat_identifiers,
gathered_ogdch_identifiers,
harvest_job)
log.debug('IDs: %r' % harvest_obj_ids)
if self.config['delete_missing_datasets']:
delete_harvest_object_ids = \
self.delete_geocat_ids(
harvest_job,
harvest_obj_ids,
packages_to_delete
)
harvest_obj_ids.extend(delete_harvest_object_ids)
return harvest_obj_ids
def delete_geocat_ids(self,
harvest_job,
harvest_obj_ids,
packages_to_delete):
delete_harvest_obj_ids = []
for package_info in packages_to_delete:
obj = HarvestObject(
guid=package_info[1].name,
job=harvest_job,
extras=[HarvestObjectExtra(key='import_action',
value='delete')])
obj.save()
delete_harvest_obj_ids.append(obj.id)
return delete_harvest_obj_ids
def map_geocat_dataset(self,
csw_data,
csw_map,
gathered_geocat_identifiers,
gathered_ogdch_identifiers,
harvest_job):
mapped_harvest_obj_ids = []
for geocat_id in gathered_geocat_identifiers:
ogdch_identifier = ogdch_map_utils.map_geocat_to_ogdch_identifier(
geocat_identifier=geocat_id,
organization_slug=self.config['organization'])
if ogdch_identifier in gathered_ogdch_identifiers:
try:
csw_record_as_string = csw_data.get_record_by_id(geocat_id)
except Exception as e:
self._save_gather_error(
'Error when reading csw record form source: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
try:
dataset_dict = csw_map.get_metadata(csw_record_as_string,
geocat_id)
except Exception as e:
self._save_gather_error(
'Error when mapping csw data to dcat: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
try:
harvest_obj = \
HarvestObject(guid=ogdch_identifier,
job=harvest_job,
content=json.dumps(dataset_dict))
harvest_obj.save()
except Exception as e:
self._save_gather_error(
'Error when processsing dataset: %s %r / %s'
% (ogdch_identifier, e, traceback.format_exc()),
harvest_job)
continue
else:
mapped_harvest_obj_ids.append(harvest_obj.id)
return mapped_harvest_obj_ids
def fetch_stage(self, harvest_object):
return True
def import_stage(self, harvest_object): # noqa
log.debug('In GeocatHarvester import_stage')
if not harvest_object:
log.error('No harvest object received')
self._save_object_error(
'No harvest object received',
harvest_object
)
return False
import_action = \
search_utils.get_value_from_object_extra(harvest_object.extras,
'import_action')
if import_action and import_action == 'delete':
log.debug('import action: %s' % import_action)
harvest_object.current = False
return self._delete_dataset({'id': harvest_object.guid})
if harvest_object.content is None:
self._save_object_error('Empty content for object %s' %
harvest_object.id,
harvest_object, 'Import')
return False
try:
pkg_dict = json.loads(harvest_object.content)
except ValueError:
self._save_object_error('Could not parse content for object {0}'
.format(harvest_object.id), harvest_object, 'Import') # noqa
return False
pkg_info = \
search_utils.find_package_for_identifier(harvest_object.guid)
context = {
'ignore_auth': True,
'user': HARVEST_USER,
}
try:
if pkg_info:
# Change default schema to ignore lists of dicts, which
# are stored in the '__junk' field
schema = default_update_package_schema()
context['schema'] = schema
schema['__junk'] = [ignore]
pkg_dict['name'] = pkg_info.name
pkg_dict['id'] = pkg_info.package_id
search_utils.map_resources_to_ids(pkg_dict, pkg_info)
updated_pkg = \
tk.get_action('package_update')(context, pkg_dict)
harvest_object.current = True
harvest_object.package_id = updated_pkg['id']
harvest_object.save()
log.debug("Updated PKG: %s" % updated_pkg)
else:
flat_title = _derive_flat_title(pkg_dict['title'])
if not flat_title:
self._save_object_error(
'Unable to derive name from title %s'
% pkg_dict['title'], harvest_object, 'Import')
return False
pkg_dict['name'] = self._gen_new_name(flat_title)
schema = default_create_package_schema()
context['schema'] = schema
schema['__junk'] = [ignore]
log.debug("No package found, create a new one!")
# generate an id to reference it in the harvest_object
pkg_dict['id'] = unicode(uuid.uuid4())
log.info('Package with GUID %s does not exist, '
'let\'s create it' % harvest_object.guid)
harvest_object.current = True
harvest_object.package_id = pkg_dict['id']
harvest_object.add()
model.Session.execute(
'SET CONSTRAINTS harvest_object_package_id_fkey DEFERRED')
model.Session.flush()
created_pkg = \
tk.get_action('package_create')(context, pkg_dict)
log.debug("Created PKG: %s" % created_pkg)
Session.commit()
return True
except Exception as e:
self._save_object_error(
('Exception in import stage: %r / %s'
% (e, traceback.format_exc())), harvest_object)
return False
def _create_new_context(self):
# get the site user
site_user = tk.get_action('get_site_user')(
{'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': Session,
'user': site_user['name'],
}
return context
def _delete_dataset(self, package_dict):
log.debug('deleting dataset %s' % package_dict['id'])
context = self._create_new_context()
tk.get_action('dataset_purge')(
context.copy(),
package_dict
)
return True
def _get_geocat_permalink_relation(self, geocat_pkg_id):
return {'url': self.config['geocat_perma_link_url'] + geocat_pkg_id,
'label': self.config['geocat_perma_link_label']}
class GeocatConfigError(Exception):
pass
def _derive_flat_title(title_dict):
"""localizes language dict if no language is specified"""
return title_dict.get('de') or title_dict.get('fr') or title_dict.get('en') or title_dict.get('it') or "" # noqa
| opendata-swiss/ckanext-geocat | ckanext/geocat/harvester.py | Python | agpl-3.0 | 13,479 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'l10n_ar_account_check_sale',
'version': '1.0',
'summary': 'Venta de cheques de terceros',
'description': """
Cheques
==================================
Venta de cheques de terceros.
""",
'author': 'OPENPYME S.R.L.',
'website': 'http://www.openpyme.com.ar',
'category': 'Accounting',
'depends': [
'l10n_ar_account_check',
],
'data': [
'data/sold_check_data.xml',
'views/account_third_check_view.xml',
'views/account_sold_check_view.xml',
'wizard/wizard_sell_check_view.xml',
'security/ir.model.access.csv',
'data/security.xml',
],
'active': False,
'application': True,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| odoo-arg/odoo_l10n_ar | l10n_ar_account_check_sale/__manifest__.py | Python | agpl-3.0 | 1,674 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from ddd.logic.learning_unit.builder.effective_class_identity_builder import EffectiveClassIdentityBuilder
from ddd.logic.learning_unit.commands import GetEffectiveClassCommand
from ddd.logic.learning_unit.domain.model.effective_class import EffectiveClass
from ddd.logic.learning_unit.repository.i_effective_class import IEffectiveClassRepository
# FIXME :: à tester unitairement + renvoyer EffectiveClassFromRepositoryDTO au lieu de l'objet du domaine
def get_effective_class(
cmd: 'GetEffectiveClassCommand',
effective_class_repository: 'IEffectiveClassRepository'
) -> 'EffectiveClass':
effective_class_identity = EffectiveClassIdentityBuilder.build_from_code_and_learning_unit_identity_data(
class_code=cmd.class_code,
learning_unit_code=cmd.learning_unit_code,
learning_unit_year=cmd.learning_unit_year
)
return effective_class_repository.get(entity_id=effective_class_identity)
| uclouvain/osis | ddd/logic/learning_unit/use_case/read/get_effective_class_service.py | Python | agpl-3.0 | 2,220 |
import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.raw_module import RawDescriptor
from .x_module import XModule
from xblock.core import Integer, Scope, String, List, Float, Boolean
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module, CombinedOpenEndedV1Descriptor
from collections import namedtuple
from .fields import Date, Timedelta
import textwrap
log = logging.getLogger("mitx.courseware")
V1_SETTINGS_ATTRIBUTES = [
"display_name", "max_attempts", "graded", "accept_file_upload",
"skip_spelling_checks", "due", "graceperiod", "weight", "min_to_calibrate",
"max_to_calibrate", "peer_grader_count", "required_peer_grading",
]
V1_STUDENT_ATTRIBUTES = ["current_task_number", "task_states", "state",
"student_attempts", "ready_to_reset", "old_task_states"]
V1_ATTRIBUTES = V1_SETTINGS_ATTRIBUTES + V1_STUDENT_ATTRIBUTES
VersionTuple = namedtuple('VersionTuple', ['descriptor', 'module', 'settings_attributes', 'student_attributes'])
VERSION_TUPLES = {
1: VersionTuple(CombinedOpenEndedV1Descriptor, CombinedOpenEndedV1Module, V1_SETTINGS_ATTRIBUTES,
V1_STUDENT_ATTRIBUTES),
}
DEFAULT_VERSION = 1
DEFAULT_DATA = textwrap.dedent("""\
<combinedopenended>
<prompt>
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your views on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
</prompt>
<rubric>
<rubric>
<category>
<description>
Ideas
</description>
<option>
Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
</option>
<option>
Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
</option>
<option>
Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
</option>
<option>
Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
</option>
</category>
<category>
<description>
Content
</description>
<option>
Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
</option>
<option>
Includes little information and few or no details. Explores only one or two facets of the topic.
</option>
<option>
Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
</option>
<option>
Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
</option>
</category>
<category>
<description>
Organization
</description>
<option>
Ideas organized illogically, transitions weak, and response difficult to follow.
</option>
<option>
Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
</option>
<option>
Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
</option>
</category>
<category>
<description>
Style
</description>
<option>
Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
</option>
<option>
Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
</option>
<option>
Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
</option>
</category>
<category>
<description>
Voice
</description>
<option>
Demonstrates language and tone that may be inappropriate to task and reader.
</option>
<option>
Demonstrates an attempt to adjust language and tone to task and reader.
</option>
<option>
Demonstrates effective adjustment of language and tone to task and reader.
</option>
</category>
</rubric>
</rubric>
<task>
<selfassessment/></task>
<task>
<openended min_score_to_attempt="4" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
<task>
<openended min_score_to_attempt="9" max_score_to_attempt="12" >
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "peer_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>
</task>
</combinedopenended>
""")
class VersionInteger(Integer):
"""
A model type that converts from strings to integers when reading from json.
Also does error checking to see if version is correct or not.
"""
def from_json(self, value):
try:
value = int(value)
if value not in VERSION_TUPLES:
version_error_string = "Could not find version {0}, using version {1} instead"
log.error(version_error_string.format(value, DEFAULT_VERSION))
value = DEFAULT_VERSION
except:
value = DEFAULT_VERSION
return value
class CombinedOpenEndedFields(object):
display_name = String(
display_name="Display Name",
help="This name appears in the horizontal navigation at the top of the page.",
default="Open Response Assessment",
scope=Scope.settings
)
current_task_number = Integer(
help="Current task that the student is on.",
default=0,
scope=Scope.user_state
)
old_task_states = List(
help=("A list of lists of state dictionaries for student states that are saved."
"This field is only populated if the instructor changes tasks after"
"the module is created and students have attempted it (for example changes a self assessed problem to "
"self and peer assessed."),
scope = Scope.user_state
)
task_states = List(
help="List of state dictionaries of each task within this module.",
scope=Scope.user_state
)
state = String(
help="Which step within the current task that the student is on.",
default="initial",
scope=Scope.user_state
)
graded = Boolean(
display_name="Graded",
help='Defines whether the student gets credit for grading this problem.',
default=False,
scope=Scope.settings
)
student_attempts = Integer(
help="Number of attempts taken by the student on this problem",
default=0,
scope=Scope.user_state
)
ready_to_reset = Boolean(
help="If the problem is ready to be reset or not.",
default=False,
scope=Scope.user_state
)
max_attempts = Integer(
display_name="Maximum Attempts",
help="The number of times the student can try to answer this problem.",
default=1,
scope=Scope.settings,
values={"min": 1 }
)
accept_file_upload = Boolean(
display_name="Allow File Uploads",
help="Whether or not the student can submit files as a response.",
default=False,
scope=Scope.settings
)
skip_spelling_checks = Boolean(
display_name="Disable Quality Filter",
help="If False, the Quality Filter is enabled and submissions with poor spelling, short length, or poor grammar will not be peer reviewed.",
default=False,
scope=Scope.settings
)
due = Date(
help="Date that this problem is due by",
scope=Scope.settings
)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings
)
version = VersionInteger(help="Current version number", default=DEFAULT_VERSION, scope=Scope.settings)
data = String(help="XML data for the problem", scope=Scope.content,
default=DEFAULT_DATA)
weight = Float(
display_name="Problem Weight",
help="Defines the number of points each problem is worth. If the value is not set, each problem is worth one point.",
scope=Scope.settings,
values={"min": 0, "step": ".1"},
default=1
)
min_to_calibrate = Integer(
display_name="Minimum Peer Grading Calibrations",
help="The minimum number of calibration essays each student will need to complete for peer grading.",
default=3,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
max_to_calibrate = Integer(
display_name="Maximum Peer Grading Calibrations",
help="The maximum number of calibration essays each student will need to complete for peer grading.",
default=6,
scope=Scope.settings,
values={"min": 1, "max": 20, "step": "1"}
)
peer_grader_count = Integer(
display_name="Peer Graders per Response",
help="The number of peers who will grade each submission.",
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
required_peer_grading = Integer(
display_name="Required Peer Grading",
help="The number of other students each student making a submission will have to grade.",
default=3,
scope=Scope.settings,
values={"min": 1, "step": "1", "max": 5}
)
markdown = String(
help="Markdown source of this module",
default=textwrap.dedent("""\
[prompt]
<h3>Censorship in the Libraries</h3>
<p>'All of us can think of a book that we hope none of our children or any other children have taken off the shelf. But if I have the right to remove that book from the shelf -- that work I abhor -- then you also have exactly the same right and so does everyone else. And then we have no books left on the shelf for any of us.' --Katherine Paterson, Author
</p>
<p>
Write a persuasive essay to a newspaper reflecting your vies on censorship in libraries. Do you believe that certain materials, such as books, music, movies, magazines, etc., should be removed from the shelves if they are found offensive? Support your position with convincing arguments from your own experience, observations, and/or reading.
</p>
[prompt]
[rubric]
+ Ideas
- Difficult for the reader to discern the main idea. Too brief or too repetitive to establish or maintain a focus.
- Attempts a main idea. Sometimes loses focus or ineffectively displays focus.
- Presents a unifying theme or main idea, but may include minor tangents. Stays somewhat focused on topic and task.
- Presents a unifying theme or main idea without going off on tangents. Stays completely focused on topic and task.
+ Content
- Includes little information with few or no details or unrelated details. Unsuccessful in attempts to explore any facets of the topic.
- Includes little information and few or no details. Explores only one or two facets of the topic.
- Includes sufficient information and supporting details. (Details may not be fully developed; ideas may be listed.) Explores some facets of the topic.
- Includes in-depth information and exceptional supporting details that are fully developed. Explores all facets of the topic.
+ Organization
- Ideas organized illogically, transitions weak, and response difficult to follow.
- Attempts to logically organize ideas. Attempts to progress in an order that enhances meaning, and demonstrates use of transitions.
- Ideas organized logically. Progresses in an order that enhances meaning. Includes smooth transitions.
+ Style
- Contains limited vocabulary, with many words used incorrectly. Demonstrates problems with sentence patterns.
- Contains basic vocabulary, with words that are predictable and common. Contains mostly simple sentences (although there may be an attempt at more varied sentence patterns).
- Includes vocabulary to make explanations detailed and precise. Includes varied sentence patterns, including complex sentences.
+ Voice
- Demonstrates language and tone that may be inappropriate to task and reader.
- Demonstrates an attempt to adjust language and tone to task and reader.
- Demonstrates effective adjustment of language and tone to task and reader.
[rubric]
[tasks]
(Self), ({4-12}AI), ({9-12}Peer)
[tasks]
"""),
scope=Scope.settings
)
class CombinedOpenEndedModule(CombinedOpenEndedFields, XModule):
"""
This is a module that encapsulates all open ended grading (self assessment, peer assessment, etc).
It transitions between problems, and support arbitrary ordering.
Each combined open ended module contains one or multiple "child" modules.
Child modules track their own state, and can transition between states. They also implement get_html and
handle_ajax.
The combined open ended module transitions between child modules as appropriate, tracks its own state, and passess
ajax requests from the browser to the child module or handles them itself (in the cases of reset and next problem)
ajax actions implemented by all children are:
'save_answer' -- Saves the student answer
'save_assessment' -- Saves the student assessment (or external grader assessment)
'save_post_assessment' -- saves a post assessment (hint, feedback on feedback, etc)
ajax actions implemented by combined open ended module are:
'reset' -- resets the whole combined open ended module and returns to the first child module
'next_problem' -- moves to the next child module
'get_results' -- gets results from a given child module
Types of children. Task is synonymous with child module, so each combined open ended module
incorporates multiple children (tasks):
openendedmodule
selfassessmentmodule
CombinedOpenEndedModule.__init__ takes the same arguments as xmodule.x_module:XModule.__init__
"""
STATE_VERSION = 1
# states
INITIAL = 'initial'
ASSESSING = 'assessing'
INTERMEDIATE_DONE = 'intermediate_done'
DONE = 'done'
icon_class = 'problem'
js = {
'coffee':
[
resource_string(__name__, 'js/src/combinedopenended/display.coffee'),
resource_string(__name__, 'js/src/collapsible.coffee'),
resource_string(__name__, 'js/src/javascript_loader.coffee'),
]
}
js_module_name = "CombinedOpenEnded"
css = {'scss': [resource_string(__name__, 'css/combinedopenended/display.scss')]}
def __init__(self, *args, **kwargs):
"""
Definition file should have one or many task blocks, a rubric block, and a prompt block.
See DEFAULT_DATA for a sample.
"""
XModule.__init__(self, *args, **kwargs)
self.system.set('location', self.location)
if self.task_states is None:
self.task_states = []
if self.old_task_states is None:
self.old_task_states = []
version_tuple = VERSION_TUPLES[self.version]
self.student_attributes = version_tuple.student_attributes
self.settings_attributes = version_tuple.settings_attributes
attributes = self.student_attributes + self.settings_attributes
static_data = {}
instance_state = {k: getattr(self, k) for k in attributes}
self.child_descriptor = version_tuple.descriptor(self.system)
self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system)
self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor,
instance_state=instance_state, static_data=static_data,
attributes=attributes)
self.save_instance_data()
def get_html(self):
self.save_instance_data()
return_value = self.child_module.get_html()
return return_value
def handle_ajax(self, dispatch, data):
self.save_instance_data()
return_value = self.child_module.handle_ajax(dispatch, data)
self.save_instance_data()
return return_value
def get_instance_state(self):
return self.child_module.get_instance_state()
def get_score(self):
return self.child_module.get_score()
def max_score(self):
return self.child_module.max_score()
def get_progress(self):
return self.child_module.get_progress()
@property
def due_date(self):
return self.child_module.due_date
def save_instance_data(self):
for attribute in self.student_attributes:
setattr(self, attribute, getattr(self.child_module, attribute))
class CombinedOpenEndedDescriptor(CombinedOpenEndedFields, RawDescriptor):
"""
Module for adding combined open ended questions
"""
mako_template = "widgets/open-ended-edit.html"
module_class = CombinedOpenEndedModule
has_score = True
always_recalculate_grades = True
template_dir_name = "combinedopenended"
#Specify whether or not to pass in S3 interface
needs_s3_interface = True
#Specify whether or not to pass in open ended interface
needs_open_ended_interface = True
metadata_attributes = RawDescriptor.metadata_attributes
js = {'coffee': [resource_string(__name__, 'js/src/combinedopenended/edit.coffee')]}
js_module_name = "OpenEndedMarkdownEditingDescriptor"
css = {'scss': [resource_string(__name__, 'css/editor/edit.scss'), resource_string(__name__, 'css/combinedopenended/edit.scss')]}
metadata_translations = {
'is_graded': 'graded',
'attempts': 'max_attempts',
}
def get_context(self):
_context = RawDescriptor.get_context(self)
_context.update({'markdown': self.markdown,
'enable_markdown': self.markdown is not None})
return _context
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(CombinedOpenEndedDescriptor, self).non_editable_metadata_fields
non_editable_fields.extend([CombinedOpenEndedDescriptor.due, CombinedOpenEndedDescriptor.graceperiod,
CombinedOpenEndedDescriptor.markdown, CombinedOpenEndedDescriptor.version])
return non_editable_fields
| pdehaye/theming-edx-platform | common/lib/xmodule/xmodule/combined_open_ended_module.py | Python | agpl-3.0 | 21,389 |
# Copyright 2020 Tecnativa - Alexandre Díaz
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from copy import deepcopy
from xml.sax.saxutils import escape
from lxml import etree as ElementTree
from odoo import SUPERUSER_ID, api
def _merge_views(env, xmlids):
old_view_ids = env["ir.ui.view"].search(
[("key", "in", xmlids), ("active", "=", True)]
)
# Get only the edited version of the views (if has it)
old_view_ids_edited = old_view_ids.filtered("website_id")
old_view_ids_edited_keys = old_view_ids_edited.mapped("key")
views_to_discard = env["ir.ui.view"]
for old_view in old_view_ids:
if not old_view.website_id and old_view.key in old_view_ids_edited_keys:
views_to_discard |= old_view
old_view_ids -= views_to_discard
new_website_page = env.ref("website_legal_page.legal_page_page")
new_view_id = env.ref("website_legal_page.legal_page")
# 'Dolly' separator element
separator = ElementTree.fromstring(
"<div class='s_hr text-left pt32 pb32' data-name='Separator'>"
+ "<hr class='s_hr_1px s_hr_solid border-600 w-100 mx-auto'/></div>"
)
# Replace new content with the old one per website
website_ids = old_view_ids.mapped("website_id")
for website_id in website_ids:
new_xml = ElementTree.fromstring(new_view_id.arch)
table_content_list = new_xml.xpath("//div[@id='section_list']/ul")[0]
sections_content = new_xml.xpath("//div[@id='section_content']")[0]
has_views_edited = any(
old_view_ids_edited.filtered(lambda x: x.website_id == website_id)
)
# Remove 'IS A SAMPLE' alert
if has_views_edited:
alert = new_xml.xpath(
"//section[@data-name='Title']//div[@data-name='Alert']"
)[0]
alert.find("..").remove(alert)
# Remove unused content
for child in table_content_list.getchildren():
table_content_list.remove(child)
for child in sections_content.getchildren():
sections_content.remove(child)
views_done = env["ir.ui.view"]
for old_view_id in old_view_ids:
if old_view_id.website_id != website_id:
continue
anchor_name = old_view_id.key.split(".")[1]
# Insert item in table content list
list_item = ElementTree.fromstring(
"<li><p><a href='#{}'>{}</a></p></li>".format(
anchor_name, escape(old_view_id.name)
)
)
table_content_list.append(list_item)
# Insert section content
old_xml = ElementTree.fromstring(old_view_id.arch)
old_content = old_xml.xpath("//div[@id='wrap']")[0]
sections_content.append(deepcopy(separator))
sections_content.append(
ElementTree.fromstring(
"<a class='legal_anchor' id='%s'/>" % anchor_name
)
)
for children in old_content.getchildren():
sections_content.append(children)
views_done |= old_view_id
old_view_ids -= views_done
# Create a new page with the changes
view_id = env["ir.ui.view"].create(
{
"arch": ElementTree.tostring(new_xml, encoding="unicode"),
"website_id": website_id.id,
"key": new_view_id.key,
"name": new_view_id.name,
"type": "qweb",
}
)
env["website.page"].create(
{
"name": new_website_page.name,
"url": new_website_page.url,
"view_id": view_id.id,
"is_published": True,
"website_id": website_id.id,
"website_indexed": True,
"website_published": True,
}
)
def post_init_hook(cr, registry):
with api.Environment.manage():
env = api.Environment(cr, SUPERUSER_ID, {})
is_website_sale_installed = (
env["ir.module.module"].search_count(
[("name", "=", "website_sale"), ("state", "=", "installed")]
)
> 0
)
if is_website_sale_installed:
_merge_views(env, ["website_sale.terms"])
| OCA/website | website_legal_page/hooks.py | Python | agpl-3.0 | 4,340 |
#############################################################################
##
## Copyright (C) 2017 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for QtMultimediaWidgets'''
import unittest
from helper import UsesQApplication
from PySide2.QtMultimediaWidgets import QGraphicsVideoItem, QVideoWidget
from PySide2.QtWidgets import QGraphicsScene, QGraphicsView, QVBoxLayout, QWidget
from PySide2.QtCore import QTimer
class MyWidget(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QVBoxLayout(self)
layout.addWidget(QVideoWidget())
graphicsScene = QGraphicsScene()
graphicsView = QGraphicsView(graphicsScene)
graphicsScene.addItem(QGraphicsVideoItem())
layout.addWidget(graphicsView)
class QMultimediaWidgetsTest(UsesQApplication):
def testMultimediaWidgets(self):
w = MyWidget()
w.show()
timer = QTimer.singleShot(100, self.app.quit)
self.app.exec_()
if __name__ == '__main__':
unittest.main()
| qtproject/pyside-pyside | tests/QtMultimediaWidgets/qmultimediawidgets.py | Python | lgpl-2.1 | 2,146 |
#!/usr/bin/env python3
# SPDX-License-Identifier: LGPL-2.1-or-later
#
# This file is part of libgpiod.
#
# Copyright (C) 2017-2018 Bartosz Golaszewski <[email protected]>
#
'''Simplified reimplementation of the gpioset tool in Python.'''
import gpiod
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
raise TypeError('usage: gpioset.py <gpiochip> <offset1>=<value1> ...')
with gpiod.Chip(sys.argv[1]) as chip:
offsets = []
values = []
for arg in sys.argv[2:]:
arg = arg.split('=')
offsets.append(int(arg[0]))
values.append(int(arg[1]))
lines = chip.get_lines(offsets)
lines.request(consumer=sys.argv[0], type=gpiod.LINE_REQ_DIR_OUT)
lines.set_values(values)
input()
| brgl/libgpiod | bindings/python/examples/gpioset.py | Python | lgpl-2.1 | 793 |
"""LDAP protocol proxy server"""
from twisted.internet import reactor, defer
from ldaptor.protocols.ldap import ldapserver, ldapconnector, ldapclient
from ldaptor.protocols import pureldap
class Proxy(ldapserver.BaseLDAPServer):
protocol = ldapclient.LDAPClient
client = None
waitingConnect = []
unbound = False
def __init__(self, config):
"""
Initialize the object.
@param config: The configuration.
@type config: ldaptor.interfaces.ILDAPConfig
"""
ldapserver.BaseLDAPServer.__init__(self)
self.config = config
def _whenConnected(self, fn, *a, **kw):
if self.client is None:
d = defer.Deferred()
self.waitingConnect.append((d, fn, a, kw))
return d
else:
return defer.maybeDeferred(fn, *a, **kw)
def _cbConnectionMade(self, proto):
self.client = proto
while self.waitingConnect:
d, fn, a, kw = self.waitingConnect.pop(0)
d2 = defer.maybeDeferred(fn, *a, **kw)
d2.chainDeferred(d)
def _clientQueue(self, request, controls, reply):
# TODO controls
if request.needs_answer:
d = self.client.send_multiResponse(request, self._gotResponse, reply)
# TODO handle d errbacks
else:
self.client.send_noResponse(request)
def _gotResponse(self, response, reply):
reply(response)
# TODO this is ugly
return isinstance(response, (
pureldap.LDAPSearchResultDone,
pureldap.LDAPBindResponse,
))
def _failConnection(self, reason):
#TODO self.loseConnection()
return reason # TODO
def connectionMade(self):
clientCreator = ldapconnector.LDAPClientCreator(
reactor, self.protocol)
d = clientCreator.connect(
dn='',
overrides=self.config.getServiceLocationOverrides())
d.addCallback(self._cbConnectionMade)
d.addErrback(self._failConnection)
ldapserver.BaseLDAPServer.connectionMade(self)
def connectionLost(self, reason):
assert self.client is not None
if self.client.connected:
if not self.unbound:
self.client.unbind()
self.unbound = True
else:
self.client.transport.loseConnection()
self.client = None
ldapserver.BaseLDAPServer.connectionLost(self, reason)
def _handleUnknown(self, request, controls, reply):
self._whenConnected(self._clientQueue, request, controls, reply)
return None
def handleUnknown(self, request, controls, reply):
d = defer.succeed(request)
d.addCallback(self._handleUnknown, controls, reply)
return d
def handle_LDAPUnbindRequest(self, request, controls, reply):
self.unbound = True
self.handleUnknown(request, controls, reply)
if __name__ == '__main__':
"""
Demonstration LDAP proxy; passes all requests to localhost:389.
"""
from twisted.internet import protocol
from twisted.python import log
import sys
log.startLogging(sys.stderr)
factory = protocol.ServerFactory()
factory.protocol = lambda : Proxy(overrides={
'': ('localhost', 389),
})
reactor.listenTCP(10389, factory)
reactor.run()
| antong/ldaptor | ldaptor/protocols/ldap/proxy.py | Python | lgpl-2.1 | 3,381 |
import sys
import os
import subprocess as ssubprocess
_p = None
def start_syslog():
global _p
with open(os.devnull, 'w') as devnull:
_p = ssubprocess.Popen(
['logger', '-p', 'daemon.notice', '-t', 'sshuttle'],
stdin=ssubprocess.PIPE,
stdout=devnull,
stderr=devnull
)
def close_stdin():
sys.stdin.close()
def stdout_to_syslog():
sys.stdout.flush()
os.dup2(_p.stdin.fileno(), sys.stdout.fileno())
def stderr_to_syslog():
sys.stderr.flush()
os.dup2(_p.stdin.fileno(), sys.stderr.fileno())
| sshuttle/sshuttle | sshuttle/ssyslog.py | Python | lgpl-2.1 | 588 |
import urllib2
import re
JIRA_URL='https://bugreports.qt-project.org/browse'
class JIRA:
__instance__ = None
# Helper class
class Bug:
CREATOR = 'QTCREATORBUG'
SIMULATOR = 'QTSIM'
SDK = 'QTSDK'
QT = 'QTBUG'
QT_QUICKCOMPONENTS = 'QTCOMPONENTS'
# constructor of JIRA
def __init__(self, number, bugType=Bug.CREATOR):
if JIRA.__instance__ == None:
JIRA.__instance__ = JIRA.__impl(number, bugType)
JIRA.__dict__['_JIRA__instance__'] = JIRA.__instance__
else:
JIRA.__instance__._bugType = bugType
JIRA.__instance__._number = number
JIRA.__instance__.__fetchStatusAndResolutionFromJira__()
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __getattr__(self, attr):
return getattr(self.__instance__, attr)
# overriden to make it possible to use JIRA just like the
# underlying implementation (__impl)
def __setattr__(self, attr, value):
return setattr(self.__instance__, attr, value)
# function to get an instance of the singleton
@staticmethod
def getInstance():
if '_JIRA__instance__' in JIRA.__dict__:
return JIRA.__instance__
else:
return JIRA.__impl(0, Bug.CREATOR)
# function to check if the given bug is open or not
@staticmethod
def isBugStillOpen(number, bugType=Bug.CREATOR):
tmpJIRA = JIRA(number, bugType)
return tmpJIRA.isOpen()
# function similar to performWorkaroundForBug - but it will execute the
# workaround (function) only if the bug is still open
# returns True if the workaround function has been executed, False otherwise
@staticmethod
def performWorkaroundIfStillOpen(number, bugType=Bug.CREATOR, *args):
if JIRA.isBugStillOpen(number, bugType):
return JIRA.performWorkaroundForBug(number, bugType, *args)
else:
test.warning("Bug is closed... skipping workaround!",
"You should remove potential code inside performWorkaroundForBug()")
return False
# function that performs the workaround (function) for the given bug
# if the function needs additional arguments pass them as 3rd parameter
@staticmethod
def performWorkaroundForBug(number, bugType=Bug.CREATOR, *args):
functionToCall = JIRA.getInstance().__bugs__.get("%s-%d" % (bugType, number), None)
if functionToCall:
test.warning("Using workaround for %s-%d" % (bugType, number))
functionToCall(*args)
return True
else:
JIRA.getInstance()._exitFatal_(bugType, number)
return False
# implementation of JIRA singleton
class __impl:
# constructor of __impl
def __init__(self, number, bugType):
self._number = number
self._bugType = bugType
self._localOnly = os.getenv("SYSTEST_JIRA_NO_LOOKUP")=="1"
self.__initBugDict__()
self.__fetchStatusAndResolutionFromJira__()
# function to retrieve the status of the current bug
def getStatus(self):
return self._status
# function to retrieve the resolution of the current bug
def getResolution(self):
return self._resolution
# this function checks the resolution of the given bug
# and returns True if the bug can still be assumed as 'Open' and False otherwise
def isOpen(self):
# handle special cases
if self._resolution == None:
return True
if self._resolution in ('Duplicate', 'Moved', 'Incomplete', 'Cannot Reproduce', 'Invalid'):
test.warning("Resolution of bug is '%s' - assuming 'Open' for now." % self._resolution,
"Please check the bugreport manually and update this test.")
return True
return self._resolution != 'Done'
# this function tries to fetch the status and resolution from JIRA for the given bug
# if this isn't possible or the lookup is disabled it does only check the internal
# dict whether a function for the given bug is deposited or not
def __fetchStatusAndResolutionFromJira__(self):
global JIRA_URL
data = None
if not self._localOnly:
try:
bugReport = urllib2.urlopen('%s/%s-%d' % (JIRA_URL, self._bugType, self._number))
data = bugReport.read()
except:
data = self.__tryExternalTools__()
if data == None:
test.warning("Sorry, ssl module missing - cannot fetch data via HTTPS",
"Try to install the ssl module by yourself, or set the python "
"path inside SQUISHDIR/etc/paths.ini to use a python version with "
"ssl support OR install wget or curl to get rid of this warning!")
self._localOnly = True
if data == None:
if '%s-%d' % (self._bugType, self._number) in self.__bugs__:
test.warning("Using internal dict - bug status could have changed already",
"Please check manually!")
self._status = None
self._resolution = None
return
else:
test.fatal("No workaround function deposited for %s-%d" % (self._bugType, self._number))
self._resolution = 'Done'
return
else:
data = data.replace("\r", "").replace("\n", "")
resPattern = re.compile('<span\s+id="resolution-val".*?>(?P<resolution>.*?)</span>')
statPattern = re.compile('<span\s+id="status-val".*?>(.*?<img.*?>)?(?P<status>.*?)</span>')
status = statPattern.search(data)
resolution = resPattern.search(data)
if status:
self._status = status.group("status").strip()
else:
test.fatal("FATAL: Cannot get status of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._status = None
if resolution:
self._resolution = resolution.group("resolution").strip()
else:
test.fatal("FATAL: Cannot get resolution of bugreport %s-%d" % (self._bugType, self._number),
"Looks like JIRA has changed.... Please verify!")
self._resolution = None
# simple helper function - used as fallback if python has no ssl support
# tries to find curl or wget in PATH and fetches data with it instead of
# using urllib2
def __tryExternalTools__(self):
global JIRA_URL
cmdAndArgs = { 'curl':'-k', 'wget':'-qO-' }
for call in cmdAndArgs:
prog = which(call)
if prog:
return getOutputFromCmdline('"%s" %s %s/%s-%d' % (prog, cmdAndArgs[call], JIRA_URL, self._bugType, self._number))
return None
# this function initializes the bug dict for localOnly usage and
# for later lookup which function to call for which bug
# ALWAYS update this dict when adding a new function for a workaround!
def __initBugDict__(self):
self.__bugs__= {
'QTCREATORBUG-6853':self._workaroundCreator6853_,
'QTCREATORBUG-6918':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6953':self._workaroundCreator_MacEditorFocus_,
'QTCREATORBUG-6994':self._workaroundCreator6994_,
'QTCREATORBUG-7002':self._workaroundCreator7002_
}
# helper function - will be called if no workaround for the requested bug is deposited
def _exitFatal_(self, bugType, number):
test.fatal("No workaround found for bug %s-%d" % (bugType, number))
############### functions that hold workarounds #################################
def _workaroundCreator6994_(self, *args):
if args[0] in ('Mobile Qt Application', 'Qt Gui Application', 'Qt Custom Designer Widget'):
args[1].remove('Harmattan')
test.xverify(False, "Removed Harmattan from expected targets.")
def _workaroundCreator6853_(self, *args):
if "Release" in args[0] and platform.system() == "Linux":
snooze(1)
def _workaroundCreator_MacEditorFocus_(self, *args):
editor = args[0]
nativeMouseClick(editor.mapToGlobal(QPoint(50, 50)).x, editor.mapToGlobal(QPoint(50, 50)).y, Qt.LeftButton)
def _workaroundCreator7002_(self, *args):
if platform.system() in ("Linux", "Darwin"):
result = args[0]
result.append(QtQuickConstants.Targets.EMBEDDED_LINUX)
| hdweiss/qt-creator-visualizer | tests/system/shared/workarounds.py | Python | lgpl-2.1 | 9,260 |
#!/bin/env python
# This is the building script for Python maxent extension module.
# Simply type "python setup.py build" at command line to build the extension.
# After that you can type "python setup.py install" to install the extension
# module.
#
# The script assume you use gcc on unix and msvc on win32 platform.
from sys import platform, exec_prefix
from distutils.core import setup, Extension
# change the lines below according to your boost location
if platform == "win32":
libmaxent_name = 'libmaxent'
extra_compile_args = [
"-DWIN32",
"-DPYTHON_MODULE",
"-DHAVE_FORTRAN=1",
"-DBOOST_DISABLE_THREADS",
"-DBOOST_DISABLE_ASSERTS",
"/GR",
]
data_files = [('Lib/site-packages/maxent' ,
['stlport_vc7146.dll',
'libifcoremd.dll',
'libmmd.dll']),
]
opt_lib = []
else: # unix
libmaxent_name = 'maxent'
extra_compile_args = [
"-DNDEBUG",
"-DPYTHON_MODULE",
"-DBOOST_DISABLE_THREADS",
]
data_files = []
# various options detected from running ../configure
opt_lib = []
opt_lib_path = []
ac_cv_lib_z_main = "@ac_cv_lib_z_main@"
if ac_cv_lib_z_main == 'yes':
opt_lib.append('z')
fclibs = "/usr/lib/x86_64-linux-gnu/libboost_chrono.a"
opt_lib_path.append("/usr/lib/x86_64-linux-gnu/")
opt_lib.append('boost_chrono')
opt_lib.append('boost_timer')
# if fclibs != '':
# for s in fclibs.split():
# if s[:2] == '-L':
# opt_lib_path.append(s[2:])
# elif s[:2] == '-l':
# opt_lib.append(s[2:])
# else:
# raise 'unknow FCLIBS item: %s' % s
setup(name = "maxent",
version = "version-devel",
author = "Le Zhang",
author_email = "[email protected]",
url = "http://homepages.inf.ed.ac.uk/lzhang10/maxent_toolkit.html",
description = "A Maximum Entropy Modeling toolkit in python",
long_description = """Maxent is a powerful, flexible, and easy-to-use
Maximum Entropy Modeling library for Python. The core engine is written in C++
with speed and portability in mind.
The win32 version of this module was compiled with MSVC7.1, Intel Fortran 8.0,
STLPort 4.6.
""",
license = "LGPL",
packages = ['maxent'],
ext_modules=[
Extension("maxent._cmaxent",
["maxent_wrap.cxx"],
include_dirs=[
"../src",
],
library_dirs=[
"../build/src",
] + opt_lib_path,
libraries = [libmaxent_name] + opt_lib,
extra_compile_args = extra_compile_args,
)
],
data_files = data_files,
)
| lzhang10/maxent | python/setup.py | Python | lgpl-2.1 | 2,832 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# pyliblo - Python bindings for the liblo OSC library
#
# Copyright (C) 2007-2011 Dominic Sacré <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import unittest
import re
import time
import sys
import liblo
def approx(a, b, e = 0.0002):
return abs(a - b) < e
def matchHost(host, regex):
r = re.compile(regex)
return r.match(host) != None
class Arguments:
def __init__(self, path, args, types, src, data):
self.path = path
self.args = args
self.types = types
self.src = src
self.data = data
class ServerTestCaseBase(unittest.TestCase):
def setUp(self):
self.cb = None
def callback(self, path, args, types, src, data):
self.cb = Arguments(path, args, types, src, data)
def callback_dict(self, path, args, types, src, data):
if self.cb == None:
self.cb = { }
self.cb[path] = Arguments(path, args, types, src, data)
class ServerTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.Server('1234')
def tearDown(self):
del self.server
def testPort(self):
assert self.server.get_port() == 1234
def testURL(self):
assert matchHost(self.server.get_url(), 'osc\.udp://.*:1234/')
def testSendInt(self):
self.server.add_method('/foo', 'i', self.callback, "data")
self.server.send('1234', '/foo', 123)
assert self.server.recv() == True
assert self.cb.path == '/foo'
assert self.cb.args[0] == 123
assert self.cb.types == 'i'
assert self.cb.data == "data"
assert matchHost(self.cb.src.get_url(), 'osc\.udp://.*:1234/')
def testSendBlob(self):
self.server.add_method('/blob', 'b', self.callback)
self.server.send('1234', '/blob', [4, 8, 15, 16, 23, 42])
assert self.server.recv() == True
if sys.hexversion < 0x03000000:
assert list(self.cb.args[0]) == [4, 8, 15, 16, 23, 42]
else:
assert self.cb.args[0] == b'\x04\x08\x0f\x10\x17\x2a'
def testSendVarious(self):
self.server.add_method('/blah', 'ihfdscb', self.callback)
if sys.hexversion < 0x03000000:
self.server.send(1234, '/blah', 123, 2**42, 123.456, 666.666, "hello", ('c', 'x'), (12, 34, 56))
else:
self.server.send(1234, '/blah', 123, ('h', 2**42), 123.456, 666.666, "hello", ('c', 'x'), (12, 34, 56))
assert self.server.recv() == True
assert self.cb.types == 'ihfdscb'
assert len(self.cb.args) == len(self.cb.types)
assert self.cb.args[0] == 123
assert self.cb.args[1] == 2**42
assert approx(self.cb.args[2], 123.456)
assert approx(self.cb.args[3], 666.666)
assert self.cb.args[4] == "hello"
assert self.cb.args[5] == 'x'
if sys.hexversion < 0x03000000:
assert list(self.cb.args[6]) == [12, 34, 56]
else:
assert self.cb.args[6] == b'\x0c\x22\x38'
def testSendOthers(self):
self.server.add_method('/blubb', 'tmSTFNI', self.callback)
self.server.send(1234, '/blubb', ('t', 666666.666), ('m', (1, 2, 3, 4)), ('S', 'foo'), True, ('F',), None, ('I',))
assert self.server.recv() == True
assert self.cb.types == 'tmSTFNI'
assert approx(self.cb.args[0], 666666.666)
assert self.cb.args[1] == (1, 2, 3, 4)
assert self.cb.args[2] == 'foo'
assert self.cb.args[3] == True
assert self.cb.args[4] == False
assert self.cb.args[5] == None
assert self.cb.args[6] == float('inf')
def testSendMessage(self):
self.server.add_method('/blah', 'is', self.callback)
m = liblo.Message('/blah', 42, 'foo')
self.server.send(1234, m)
assert self.server.recv() == True
assert self.cb.types == 'is'
assert self.cb.args[0] == 42
assert self.cb.args[1] == 'foo'
def testSendBundle(self):
self.server.add_method('/foo', 'i', self.callback_dict)
self.server.add_method('/bar', 's', self.callback_dict)
self.server.send(1234, liblo.Bundle(
liblo.Message('/foo', 123),
liblo.Message('/bar', "blubb")
))
assert self.server.recv(100) == True
assert self.cb['/foo'].args[0] == 123
assert self.cb['/bar'].args[0] == "blubb"
def testSendTimestamped(self):
self.server.add_method('/blubb', 'i', self.callback)
d = 1.23
t1 = time.time()
b = liblo.Bundle(liblo.time() + d)
b.add('/blubb', 42)
self.server.send(1234, b)
while not self.cb:
self.server.recv(1)
t2 = time.time()
assert approx(t2 - t1, d, 0.01)
def testSendInvalid(self):
try:
self.server.send(1234, '/blubb', ('x', 'y'))
except TypeError as e:
pass
else:
assert False
def testRecvTimeout(self):
t1 = time.time()
assert self.server.recv(500) == False
t2 = time.time()
assert t2 - t1 < 0.666
def testRecvImmediate(self):
t1 = time.time()
assert self.server.recv(0) == False
t2 = time.time()
assert t2 - t1 < 0.01
class ServerCreationTestCase(unittest.TestCase):
def testNoPermission(self):
try:
s = liblo.Server('22')
except liblo.ServerError as e:
pass
else:
assert False
def testRandomPort(self):
s = liblo.Server()
assert 1024 <= s.get_port() <= 65535
def testPort(self):
s = liblo.Server(1234)
t = liblo.Server('5678')
assert s.port == 1234
assert t.port == 5678
assert matchHost(s.url, 'osc\.udp://.*:1234/')
def testPortProto(self):
s = liblo.Server(1234, liblo.TCP)
assert matchHost(s.url, 'osc\.tcp://.*:1234/')
class ServerTCPTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.Server('1234', liblo.TCP)
def tearDown(self):
del self.server
def testSendReceive(self):
self.server.add_method('/foo', 'i', self.callback)
liblo.send(self.server.url, '/foo', 123)
assert self.server.recv() == True
assert self.cb.path == '/foo'
assert self.cb.args[0] == 123
assert self.cb.types == 'i'
def testNotReachable(self):
try:
self.server.send('osc.tcp://192.168.23.42:4711', '/foo', 23, 42)
except IOError:
pass
else:
assert False
class ServerThreadTestCase(ServerTestCaseBase):
def setUp(self):
ServerTestCaseBase.setUp(self)
self.server = liblo.ServerThread('1234')
def tearDown(self):
del self.server
def testSendAndReceive(self):
self.server.add_method('/foo', 'i', self.callback)
self.server.send('1234', '/foo', 42)
self.server.start()
time.sleep(0.2)
self.server.stop()
assert self.cb.args[0] == 42
class DecoratorTestCase(unittest.TestCase):
class TestServer(liblo.Server):
def __init__(self):
liblo.Server.__init__(self, 1234)
@liblo.make_method('/foo', 'ibm')
def foo_cb(self, path, args, types, src, data):
self.cb = Arguments(path, args, types, src, data)
def setUp(self):
self.server = self.TestServer()
def tearDown(self):
del self.server
def testSendReceive(self):
liblo.send(1234, '/foo', 42, ('b', [4, 8, 15, 16, 23, 42]), ('m', (6, 6, 6, 0)))
assert self.server.recv() == True
assert self.server.cb.path == '/foo'
assert len(self.server.cb.args) == 3
class AddressTestCase(unittest.TestCase):
def testPort(self):
a = liblo.Address(1234)
b = liblo.Address('5678')
assert a.port == 1234
assert b.port == 5678
assert a.url == 'osc.udp://localhost:1234/'
def testUrl(self):
a = liblo.Address('osc.udp://foo:1234/')
assert a.url == 'osc.udp://foo:1234/'
assert a.hostname == 'foo'
assert a.port == 1234
assert a.protocol == liblo.UDP
def testHostPort(self):
a = liblo.Address('foo', 1234)
assert a.url == 'osc.udp://foo:1234/'
def testHostPortProto(self):
a = liblo.Address('foo', 1234, liblo.TCP)
assert a.url == 'osc.tcp://foo:1234/'
if __name__ == "__main__":
unittest.main()
| gesellkammer/pyliblo | test/unit.py | Python | lgpl-2.1 | 8,818 |
'''
Often used utility functions
Copyright 2020 by Massimo Del Fedele
'''
import sys
import uno
from com.sun.star.beans import PropertyValue
from datetime import date
import calendar
import PyPDF2
'''
ALCUNE COSE UTILI
La finestra che contiene il documento (o componente) corrente:
desktop.CurrentFrame.ContainerWindow
Non cambia nulla se è aperto un dialogo non modale,
ritorna SEMPRE il frame del documento.
desktop.ContainerWindow ritorna un None -- non so a che serva
Per ottenere le top windows, c'è il toolkit...
tk = ctx.ServiceManager.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
tk.getTopWindowCount() ritorna il numero delle topwindow
tk.getTopWIndow(i) ritorna una topwindow dell'elenco
tk.getActiveTopWindow () ritorna la topwindow attiva
La topwindow attiva, per essere attiva deve, appunto, essere attiva, indi avere il focus
Se si fa il debug, ad esempio, è probabile che la finestra attiva sia None
Resta quindi SEMPRE il problema di capire come fare a centrare un dialogo sul componente corrente.
Se non ci sono dialoghi in esecuzione, il dialogo creato prende come parent la ContainerWindow(si suppone...)
e quindi viene posizionato in base a quella
Se c'è un dialogo aperto e nell'event handler se ne apre un altro, l'ultimo prende come parent il precedente,
e viene quindi posizionato in base a quello e non alla schermata principale.
Serve quindi un metodo per trovare le dimensioni DELLA FINESTRA PARENT di un dialogo, per posizionarlo.
L'oggetto UnoControlDialog permette di risalire al XWindowPeer (che non serve ad una cippa), alla XView
(che mi fornisce la dimensione del dialogo ma NON la parent...), al UnoControlDialogModel, che fornisce
la proprietà 'DesktopAsParent' che mi dice SOLO se il dialogo è modale (False) o non modale (True)
L'unica soluzione che mi viene in mente è tentare con tk.ActiveTopWindow e, se None, prendere quella del desktop
'''
def getComponentContext():
'''
Get current application's component context
'''
try:
if __global_context__ is not None:
return __global_context__
return uno.getComponentContext()
except Exception:
return uno.getComponentContext()
def getDesktop():
'''
Get current application's LibreOffice desktop
'''
ctx = getComponentContext()
return ctx.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", ctx)
def getDocument():
'''
Get active document
'''
desktop = getDesktop()
# try to activate current frame
# needed sometimes because UNO doesnt' find the correct window
# when debugging.
try:
desktop.getCurrentFrame().activate()
except Exception:
pass
return desktop.getCurrentComponent()
def getServiceManager():
'''
Gets the service manager
'''
return getComponentContext().ServiceManager
def createUnoService(serv):
'''
create an UNO service
'''
return getComponentContext().getServiceManager().createInstance(serv)
def MRI(target):
ctx = getComponentContext()
mri = ctx.ServiceManager.createInstanceWithContext("mytools.Mri", ctx)
mri.inspect(target)
def isLeenoDocument():
'''
check if current document is a LeenO document
'''
try:
return getDocument().getSheets().hasByName('S2')
except Exception:
return False
def DisableDocumentRefresh(oDoc):
'''
Disabilita il refresh per accelerare le procedure
'''
oDoc.lockControllers()
oDoc.addActionLock()
def EnableDocumentRefresh(oDoc):
'''
Riabilita il refresh
'''
oDoc.removeActionLock()
oDoc.unlockControllers()
def getGlobalVar(name):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
return bDict.get('LEENO_GLOBAL_' + name)
def setGlobalVar(name, value):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
bDict['LEENO_GLOBAL_' + name] = value
def initGlobalVars(dict):
if type(__builtins__) == type(sys):
bDict = __builtins__.__dict__
else:
bDict = __builtins__
for key, value in dict.items():
bDict['LEENO_GLOBAL_' + key] = value
def dictToProperties(values, unoAny=False):
'''
convert a dictionary in a tuple of UNO properties
if unoAny is True, return the result in an UNO Any variable
otherwise use a python tuple
'''
ps = tuple([PropertyValue(Name=n, Value=v) for n, v in values.items()])
if unoAny:
ps = uno.Any('[]com.sun.star.beans.PropertyValue', ps)
return ps
def daysInMonth(dat):
'''
returns days in month of date dat
'''
month = dat.month + 1
year = dat.year
if month > 12:
month = 1
year += 1
dat2 = date(year=year, month=month, day=dat.day)
t = dat2 - dat
return t.days
def firstWeekDay(dat):
'''
returns first week day in month from dat
monday is 0
'''
return calendar.weekday(dat.year, dat.month, 1)
DAYNAMES = ['Lun', 'Mar', 'Mer', 'Gio', 'Ven', 'Sab', 'Dom']
MONTHNAMES = [
'Gennaio', 'Febbraio', 'Marzo', 'Aprile',
'Maggio', 'Giugno', 'Luglio', 'Agosto',
'Settembre', 'Ottobre', 'Novembre', 'Dicembre'
]
def date2String(dat, fmt = 0):
'''
conversione data in stringa
fmt = 0 25 Febbraio 2020
fmt = 1 25/2/2020
fmt = 2 25-02-2020
fmt = 3 25.02.2020
'''
d = dat.day
m = dat.month
if m < 10:
ms = '0' + str(m)
else:
ms = str(m)
y = dat.year
if fmt == 1:
return str(d) + '/' + ms + '/' + str(y)
elif fmt == 2:
return str(d) + '-' + ms + '-' + str(y)
elif fmt == 3:
return str(d) + '.' + ms + '.' + str(y)
else:
return str(d) + ' ' + MONTHNAMES[m - 1] + ' ' + str(y)
def string2Date(s):
if '.' in s:
sp = s.split('.')
elif '/' in s:
sp = s.split('/')
elif '-' in s:
sp = s.split('-')
else:
return date.today()
if len(sp) != 3:
raise Exception
day = int(sp[0])
month = int(sp[1])
year = int(sp[2])
return date(day=day, month=month, year=year)
def countPdfPages(path):
'''
Returns the number of pages in a PDF document
using external PyPDF2 module
'''
with open(path, 'rb') as f:
pdf = PyPDF2.PdfFileReader(f)
return pdf.getNumPages()
def replacePatternWithField(oTxt, pattern, oField):
'''
Replaces a string pattern in a Text object
(for example '[PATTERN]') with the given field
'''
# pattern may be there many times...
repl = False
pos = oTxt.String.find(pattern)
while pos >= 0:
#create a cursor
cursor = oTxt.createTextCursor()
# use it to select the pattern
cursor.collapseToStart()
cursor.goRight(pos, False)
cursor.goRight(len(pattern), True)
# remove the pattern from text
cursor.String = ''
# insert the field at cursor's position
cursor.collapseToStart()
oTxt.insertTextContent(cursor, oField, False)
# next occurrence of pattern
pos = oTxt.String.find(pattern)
repl = True
return repl
| giuserpe/leeno | src/Ultimus.oxt/python/pythonpath/LeenoUtils.py | Python | lgpl-2.1 | 7,316 |
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, sys
def open_in_browser(link):
browser = os.environ.get('BROWSER', 'firefox')
child = os.fork()
if child == 0:
# We are the child
try:
os.spawnlp(os.P_NOWAIT, browser, browser, link)
os._exit(0)
except Exception, ex:
print >>sys.stderr, "Error", ex
os._exit(1)
os.waitpid(child, 0)
| pombredanne/zero-install | zeroinstall/0launch-gui/browser.py | Python | lgpl-2.1 | 419 |
# cell definition
# name = 'Epos_AD'
# libname = 'can'
inp = 0
outp = 1
parameters = dict() #parametriseerbare cell
properties = {'Device ID': ' 0x01', 'Channel [0/1]': ' 0', 'name': 'epos_areadBlk'} #voor netlisten
#view variables:
iconSource = 'AD'
views = {'icon':iconSource}
| imec-myhdl/pycontrol-gui | BlockEditor/libraries/library_can/Epos_AD.py | Python | lgpl-2.1 | 283 |
import json
import maps
import traceback
from requests import get
from requests import post
from requests import put
from tendrl.commons.utils import log_utils as logger
from tendrl.monitoring_integration.grafana import constants
from tendrl.monitoring_integration.grafana import exceptions
from tendrl.monitoring_integration.grafana import utils
def _post_datasource(datasource_json):
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = post(
"http://{}:{}/api/datasources".format(
config.grafana_host,
config.grafana_port
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_json
)
else:
raise exceptions.ConnectionFailedException
return resp
def form_datasource_json():
config = maps.NamedDict(NS.config.data)
url = "http://" + str(config.datasource_host) + ":" \
+ str(config.datasource_port)
datasource_json = (
{'name': config.datasource_name,
'type': config.datasource_type,
'url': url,
'access': config.access,
'basicAuth': config.basicAuth,
'isDefault': config.isDefault
}
)
return datasource_json
def create_datasource():
try:
datasource_json = form_datasource_json()
response = _post_datasource(json.dumps(datasource_json))
return response
except exceptions.ConnectionFailedException:
logger.log("error", NS.get("publisher_id", None),
{'message': str(traceback.print_stack())})
raise exceptions.ConnectionFailedException
def get_data_source():
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = get(
"http://{}:{}/api/datasources/id/{}".format(
config.grafana_host,
config.grafana_port,
config.datasource_name
),
auth=config.credentials
)
else:
raise exceptions.ConnectionFailedException
return resp
def update_datasource(datasource_id):
try:
config = maps.NamedDict(NS.config.data)
datasource_json = form_datasource_json()
datasource_str = json.dumps(datasource_json)
if utils.port_open(config.grafana_port, config.grafana_host):
response = put(
"http://{}:{}/api/datasources/{}".format(
config.grafana_host,
config.grafana_port,
datasource_id
),
headers=constants.HEADERS,
auth=config.credentials,
data=datasource_str
)
else:
raise exceptions.ConnectionFailedException
return response
except exceptions.ConnectionFailedException as ex:
logger.log("error", NS.get("publisher_id", None),
{'message': str(ex)})
raise ex
| Tendrl/monitoring-integration | tendrl/monitoring_integration/grafana/datasource_utils.py | Python | lgpl-2.1 | 3,062 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import os.path
class Xbraid(MakefilePackage):
"""XBraid: Parallel time integration with Multigrid"""
homepage = "https://computing.llnl.gov/projects/parallel-time-integration-multigrid/software"
url = "https://github.com/XBraid/xbraid/archive/v2.2.0.tar.gz"
version('2.2.0', sha256='082623b2ddcd2150b3ace65b96c1e00be637876ec6c94dc8fefda88743b35ba3')
depends_on('mpi')
def build(self, spec, prefix):
make('libbraid.a')
# XBraid doesn't have a real install target, so it has to be done
# manually
def install(self, spec, prefix):
# Install headers
mkdirp(prefix.include)
headers = glob.glob('*.h')
for f in headers:
install(f, join_path(prefix.include, os.path.basename(f)))
# Install library
mkdirp(prefix.lib)
library = 'libbraid.a'
install(library, join_path(prefix.lib, library))
# Install other material (e.g., examples, tests, docs)
mkdirp(prefix.share)
install('makefile.inc', prefix.share)
install_tree('examples', prefix.share.examples)
install_tree('drivers', prefix.share.drivers)
# TODO: Some of the scripts in 'test' are useful, even for
# users; some could be deleted from an installation because
# they're not useful to users
install_tree('test', prefix.share.test)
install_tree('user_utils', prefix.share.user_utils)
install_tree('docs', prefix.share.docs)
@property
def libs(self):
return find_libraries('libbraid', root=self.prefix,
shared=False, recursive=True)
| rspavel/spack | var/spack/repos/builtin/packages/xbraid/package.py | Python | lgpl-2.1 | 1,879 |
# devices/md.py
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <[email protected]>
#
import os
import six
from gi.repository import BlockDev as blockdev
from ..devicelibs import mdraid, raid
from .. import errors
from .. import util
from ..flags import flags
from ..storage_log import log_method_call
from .. import udev
from ..size import Size
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from .container import ContainerDevice
from .raid import RaidDevice
class MDRaidArrayDevice(ContainerDevice, RaidDevice):
""" An mdraid (Linux RAID) device. """
_type = "mdarray"
_packages = ["mdadm"]
_devDir = "/dev/md"
_formatClassName = property(lambda s: "mdmember")
_formatUUIDAttr = property(lambda s: "mdUuid")
def __init__(self, name, level=None, major=None, minor=None, size=None,
memberDevices=None, totalDevices=None,
uuid=None, fmt=None, exists=False, metadataVersion=None,
parents=None, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword uuid: the device UUID
:type uuid: str
:keyword level: the device's RAID level
:type level: any valid RAID level descriptor
:keyword int memberDevices: the number of active member devices
:keyword int totalDevices: the total number of member devices
:keyword metadataVersion: the version of the device's md metadata
:type metadataVersion: str (eg: "0.90")
:keyword minor: the device minor (obsolete?)
:type minor: int
"""
# pylint: disable=unused-argument
# These attributes are used by _addParent, so they must be initialized
# prior to instantiating the superclass.
self._memberDevices = 0 # the number of active (non-spare) members
self._totalDevices = 0 # the total number of members
# avoid attribute-defined-outside-init pylint warning
self._level = None
super(MDRaidArrayDevice, self).__init__(name, fmt=fmt, uuid=uuid,
exists=exists, size=size,
parents=parents,
sysfsPath=sysfsPath)
try:
self.level = level
except errors.DeviceError as e:
# Could not set the level, so set loose the parents that were
# added in superclass constructor.
for dev in self.parents:
dev.removeChild()
raise e
self.uuid = uuid
self._totalDevices = util.numeric_type(totalDevices)
self.memberDevices = util.numeric_type(memberDevices)
self.chunkSize = mdraid.MD_CHUNK_SIZE
if not self.exists and not isinstance(metadataVersion, str):
self.metadataVersion = "default"
else:
self.metadataVersion = metadataVersion
if self.parents and self.parents[0].type == "mdcontainer" and self.type != "mdbiosraidarray":
raise errors.DeviceError("A device with mdcontainer member must be mdbiosraidarray.")
if self.exists and self.mdadmFormatUUID and not flags.testing:
# this is a hack to work around mdadm's insistence on giving
# really high minors to arrays it has no config entry for
with open("/etc/mdadm.conf", "a") as c:
c.write("ARRAY %s UUID=%s\n" % (self.path, self.mdadmFormatUUID))
@property
def mdadmFormatUUID(self):
""" This array's UUID, formatted for external use.
:returns: the array's UUID in mdadm format, if available
:rtype: str or NoneType
"""
formatted_uuid = None
if self.uuid is not None:
try:
formatted_uuid = blockdev.md.get_md_uuid(self.uuid)
except blockdev.MDRaidError:
pass
return formatted_uuid
@property
def level(self):
""" Return the raid level
:returns: raid level value
:rtype: an object that represents a RAID level
"""
return self._level
@property
def _levels(self):
""" Allowed RAID level for this type of device."""
return mdraid.RAID_levels
@level.setter
def level(self, value):
""" Set the RAID level and enforce restrictions based on it.
:param value: new raid level
:param type: object
:raises :class:`~.errors.DeviceError`: if value does not describe
a valid RAID level
:returns: None
"""
try:
level = self._getLevel(value, self._levels)
except ValueError as e:
raise errors.DeviceError(e)
self._level = level
@property
def createBitmap(self):
""" Whether or not a bitmap should be created on the array.
If the the array is sufficiently small, a bitmap yields no benefit.
If the array has no redundancy, a bitmap is just pointless.
"""
try:
return self.level.has_redundancy() and self.size >= Size(1000) and self.format.type != "swap"
except errors.RaidError:
# If has_redundancy() raises an exception then this device has
# a level for which the redundancy question is meaningless. In
# that case, creating a write-intent bitmap would be a meaningless
# action.
return False
def getSuperBlockSize(self, raw_array_size):
"""Estimate the superblock size for a member of an array,
given the total available memory for this array and raid level.
:param raw_array_size: total available for this array and level
:type raw_array_size: :class:`~.size.Size`
:returns: estimated superblock size
:rtype: :class:`~.size.Size`
"""
return blockdev.md.get_superblock_size(raw_array_size,
version=self.metadataVersion)
@property
def size(self):
"""Returns the actual or estimated size depending on whether or
not the array exists.
"""
if not self.exists or not self.mediaPresent:
try:
size = self.level.get_size([d.size for d in self.devices],
self.memberDevices,
self.chunkSize,
self.getSuperBlockSize)
except (blockdev.MDRaidError, errors.RaidError) as e:
log.info("could not calculate size of device %s for raid level %s: %s", self.name, self.level, e)
size = Size(0)
log.debug("non-existent RAID %s size == %s", self.level, size)
else:
size = self.currentSize
log.debug("existing RAID %s size == %s", self.level, size)
return size
def updateSize(self):
# pylint: disable=bad-super-call
super(ContainerDevice, self).updateSize()
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "MDRAID set (%s)" % levelstr
def __repr__(self):
s = StorageDevice.__repr__(self)
s += (" level = %(level)s spares = %(spares)s\n"
" members = %(memberDevices)s\n"
" total devices = %(totalDevices)s"
" metadata version = %(metadataVersion)s" %
{"level": self.level, "spares": self.spares,
"memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return s
@property
def dict(self):
d = super(MDRaidArrayDevice, self).dict
d.update({"level": str(self.level),
"spares": self.spares, "memberDevices": self.memberDevices,
"totalDevices": self.totalDevices,
"metadataVersion": self.metadataVersion})
return d
@property
def mdadmConfEntry(self):
""" This array's mdadm.conf entry. """
uuid = self.mdadmFormatUUID
if self.memberDevices is None or not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
fmt = "ARRAY %s level=%s num-devices=%d UUID=%s\n"
return fmt % (self.path, self.level, self.memberDevices, uuid)
@property
def totalDevices(self):
""" Total number of devices in the array, including spares. """
if not self.exists:
return self._totalDevices
else:
return len(self.parents)
def _getMemberDevices(self):
return self._memberDevices
def _setMemberDevices(self, number):
if not isinstance(number, six.integer_types):
raise ValueError("memberDevices must be an integer")
if not self.exists and number > self.totalDevices:
raise ValueError("memberDevices cannot be greater than totalDevices")
self._memberDevices = number
memberDevices = property(_getMemberDevices, _setMemberDevices,
doc="number of member devices")
def _getSpares(self):
spares = 0
if self.memberDevices is not None:
if self.totalDevices is not None and \
self.totalDevices > self.memberDevices:
spares = self.totalDevices - self.memberDevices
elif self.totalDevices is None:
spares = self.memberDevices
self._totalDevices = self.memberDevices
return spares
def _setSpares(self, spares):
max_spares = self.level.get_max_spares(len(self.parents))
if spares > max_spares:
log.debug("failed to set new spares value %d (max is %d)",
spares, max_spares)
raise errors.DeviceError("new spares value is too large")
if self.totalDevices > spares:
self.memberDevices = self.totalDevices - spares
spares = property(_getSpares, _setSpares)
def _addParent(self, member):
super(MDRaidArrayDevice, self)._addParent(member)
if self.status and member.format.exists:
# we always probe since the device may not be set up when we want
# information about it
self._size = self.currentSize
# These should be incremented when adding new member devices except
# during devicetree.populate. When detecting existing arrays we will
# have gotten these values from udev and will use them to determine
# whether we found all of the members, so we shouldn't change them in
# that case.
if not member.format.exists:
self._totalDevices += 1
self.memberDevices += 1
def _removeParent(self, member):
error_msg = self._validateParentRemoval(self.level, member)
if error_msg:
raise errors.DeviceError(error_msg)
super(MDRaidArrayDevice, self)._removeParent(member)
self.memberDevices -= 1
@property
def _trueStatusStrings(self):
""" Strings in state file for which status() should return True."""
return ("clean", "active", "active-idle", "readonly", "read-auto")
@property
def status(self):
""" This device's status.
For now, this should return a boolean:
True the device is open and ready for use
False the device is not open
"""
# check the status in sysfs
status = False
if not self.exists:
return status
if os.path.exists(self.path) and not self.sysfsPath:
# the array has been activated from outside of blivet
self.updateSysfsPath()
# make sure the active array is the one we expect
info = udev.get_device(self.sysfsPath)
uuid = udev.device_get_md_uuid(info)
if uuid and uuid != self.uuid:
log.warning("md array %s is active, but has UUID %s -- not %s",
self.path, uuid, self.uuid)
self.sysfsPath = ""
return status
state_file = "%s/md/array_state" % self.sysfsPath
try:
state = open(state_file).read().strip()
if state in self._trueStatusStrings:
status = True
except IOError:
status = False
return status
def memberStatus(self, member):
if not (self.status and member.status):
return
member_name = os.path.basename(member.sysfsPath)
path = "/sys/%s/md/dev-%s/state" % (self.sysfsPath, member_name)
try:
state = open(path).read().strip()
except IOError:
state = None
return state
@property
def degraded(self):
""" Return True if the array is running in degraded mode. """
rc = False
degraded_file = "%s/md/degraded" % self.sysfsPath
if os.access(degraded_file, os.R_OK):
val = open(degraded_file).read().strip()
if val == "1":
rc = True
return rc
@property
def members(self):
""" Returns this array's members.
:rtype: list of :class:`StorageDevice`
"""
return list(self.parents)
@property
def complete(self):
""" An MDRaidArrayDevice is complete if it has at least as many
component devices as its count of active devices.
"""
return (self.memberDevices <= len(self.members)) or not self.exists
@property
def devices(self):
""" Return a list of this array's member device instances. """
return self.parents
def _postSetup(self):
super(MDRaidArrayDevice, self)._postSetup()
self.updateSysfsPath()
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
disks = []
for member in self.devices:
member.setup(orig=orig)
disks.append(member.path)
blockdev.md.activate(self.path, members=disks, uuid=self.mdadmFormatUUID)
def _postTeardown(self, recursive=False):
super(MDRaidArrayDevice, self)._postTeardown(recursive=recursive)
# mdadm reuses minors indiscriminantly when there is no mdadm.conf, so
# we need to clear the sysfs path now so our status method continues to
# give valid results
self.sysfsPath = ''
def teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# We don't really care what the array's state is. If the device
# file exists, we want to deactivate it. mdraid has too many
# states.
if self.exists and os.path.exists(self.path):
blockdev.md.deactivate(self.path)
self._postTeardown(recursive=recursive)
def _postCreate(self):
# this is critical since our status method requires a valid sysfs path
self.exists = True # this is needed to run updateSysfsPath
self.updateSysfsPath()
StorageDevice._postCreate(self)
# update our uuid attribute with the new array's UUID
# XXX this won't work for containers since no UUID is reported for them
info = blockdev.md.detail(self.path)
self.uuid = info.uuid
for member in self.devices:
member.format.mdUuid = self.uuid
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
disks = [disk.path for disk in self.devices]
spares = len(self.devices) - self.memberDevices
level = None
if self.level:
level = str(self.level)
blockdev.md.create(self.path, level, disks, spares,
version=self.metadataVersion,
bitmap=self.createBitmap)
udev.settle()
def _remove(self, member):
self.setup()
# see if the device must be marked as failed before it can be removed
fail = (self.memberStatus(member) == "in_sync")
blockdev.md.remove(self.path, member.path, fail)
def _add(self, member):
""" Add a member device to an array.
:param str member: the member's path
:raises: blockdev.MDRaidError
"""
self.setup()
raid_devices = None
try:
if not self.level.has_redundancy():
if self.level is not raid.Linear:
raid_devices = int(blockdev.md.detail(self.name).raid_devices) + 1
except errors.RaidError:
pass
blockdev.md.add(self.path, member.path, raid_devs=raid_devices)
@property
def formatArgs(self):
formatArgs = []
if self.format.type == "ext2":
recommended_stride = self.level.get_recommended_stride(self.memberDevices)
if recommended_stride:
formatArgs = ['-R', 'stride=%d' % recommended_stride ]
return formatArgs
@property
def model(self):
return self.description
def dracutSetupArgs(self):
return set(["rd.md.uuid=%s" % self.mdadmFormatUUID])
def populateKSData(self, data):
if self.isDisk:
return
super(MDRaidArrayDevice, self).populateKSData(data)
data.level = self.level.name
data.spares = self.spares
data.members = ["raid.%d" % p.id for p in self.parents]
data.preexist = self.exists
data.device = self.name
class MDContainerDevice(MDRaidArrayDevice):
_type = "mdcontainer"
def __init__(self, name, **kwargs):
kwargs['level'] = raid.Container
super(MDContainerDevice, self).__init__(name, **kwargs)
@property
def _levels(self):
return mdraid.MDRaidLevels(["container"])
@property
def description(self):
return "BIOS RAID container"
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def _trueStatusStrings(self):
return ("clean", "active", "active-idle", "readonly", "read-auto", "inactive")
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
@property
def mediaPresent(self):
# Containers should not get any format handling done
# (the device node does not allow read / write calls)
return False
class MDBiosRaidArrayDevice(MDRaidArrayDevice):
_type = "mdbiosraidarray"
_formatClassName = property(lambda s: None)
_isDisk = True
_partitionable = True
def __init__(self, name, **kwargs):
super(MDBiosRaidArrayDevice, self).__init__(name, **kwargs)
# For container members probe size now, as we cannot determine it
# when teared down.
self._size = self.currentSize
@property
def size(self):
# For container members return probed size, as we cannot determine it
# when teared down.
return self._size
@property
def description(self):
levelstr = self.level.nick if self.level.nick else self.level.name
return "BIOS RAID set (%s)" % levelstr
@property
def mdadmConfEntry(self):
uuid = self.mdadmFormatUUID
if not uuid:
raise errors.DeviceError("array is not fully defined", self.name)
return "ARRAY %s UUID=%s\n" % (self.path, uuid)
@property
def members(self):
# If the array is a BIOS RAID array then its unique parent
# is a container and its actual member devices are the
# container's parents.
return list(self.parents[0].parents)
def teardown(self, recursive=None):
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
# we don't really care about the return value of _preTeardown here.
# see comment just above md_deactivate call
self._preTeardown(recursive=recursive)
# Since BIOS RAID sets (containers in mdraid terminology) never change
# there is no need to stop them and later restart them. Not stopping
# (and thus also not starting) them also works around bug 523334
return
| dwlehman/blivet | blivet/devices/md.py | Python | lgpl-2.1 | 22,939 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Data filter converting CSTBox v2 event logs to v3 format.
Usage: ./cbx-2to3.py < /path/to/input/file > /path/to/output/file
"""
__author__ = 'Eric Pascual - CSTB ([email protected])'
import fileinput
import json
for line in fileinput.input():
ts, var_type, var_name, value, data = line.split('\t')
# next 3 lines are specific to Actility box at home files conversion
if var_name.startswith('home.'):
var_name = var_name[5:]
var_name = '.'.join((var_type, var_name))
data = data.strip().strip('{}')
if data:
pairs = data.split(',')
data = json.dumps(dict([(k.lower(), v) for k, v in (pair.split('=') for pair in pairs)]))
else:
data = "{}"
print('\t'.join((ts, var_type, var_name, value, data)))
| cstbox/devel | bin/cbx-2to3.py | Python | lgpl-3.0 | 821 |
from typing import Callable, Any
from ..model import MetaEvent, Event
from ..exceptions import PropertyStatechartError
__all__ = ['InternalEventListener', 'PropertyStatechartListener']
class InternalEventListener:
"""
Listener that filters and propagates internal events as external events.
"""
def __init__(self, callable: Callable[[Event], Any]) -> None:
self._callable = callable
def __call__(self, event: MetaEvent) -> None:
if event.name == 'event sent':
self._callable(Event(event.event.name, **event.event.data))
class PropertyStatechartListener:
"""
Listener that propagates meta-events to given property statechart, executes
the property statechart, and checks it.
"""
def __init__(self, interpreter) -> None:
self._interpreter = interpreter
def __call__(self, event: MetaEvent) -> None:
self._interpreter.queue(event)
self._interpreter.execute()
if self._interpreter.final:
raise PropertyStatechartError(self._interpreter)
| AlexandreDecan/sismic | sismic/interpreter/listener.py | Python | lgpl-3.0 | 1,061 |
#!/usr/bin/env python
'''
Created on Jan 6, 2018
@author: consultit
'''
from panda3d.core import Filename
import sys, os
from subprocess import call
### NOTE: currently this script works only on GNU/Linux
currdir = os.path.abspath(sys.path[0])
builddir = Filename.from_os_specific(os.path.join(currdir, '/ely/')).get_fullpath()
elydir = Filename.fromOsSpecific(os.path.join(currdir, '/ely/')).getFullpath()
lpref = ''
mpref = ''
lsuff = '.so'
###
tools = 'libtools'
modules = ['ai', 'audio', 'control', 'physics']
if __name__ == '__main__':
# cwd
os.chdir(currdir + builddir)
# build 'tools'
libtools = lpref + tools + lsuff
print('building "' + libtools + '" ...')
toolsdir = '..' + elydir + tools
args = ['build.py', '--dir', toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + libtools + '" ...')
#args = [libtools, toolsdir]
#call(['/usr/bin/install'] + args)
# build modules
for module in modules:
modulelib = mpref + module + lsuff
print('building "' + modulelib + '" ...')
moduledir = '..' + elydir + module
args = ['build.py', '--dir', moduledir, '--libs', libtools, '--libs_src',
toolsdir, '--clean']
call(['/usr/bin/python'] + args)
#print('installing "' + modulelib + '" ...')
#args = [modulelib, moduledir]
#call(['/usr/bin/install'] + args)
| consultit/Ely | setup.py | Python | lgpl-3.0 | 1,423 |
""" Barcode Creation (PDF417)
"""
import os
basedir = os.path.split(__file__)[0]
bcdelib = os.path.join(basedir, 'psbcdelib.ps')
class Barcode(object):
__lib__ = open(bcdelib, 'r').read()
@property
def ps(self):
raise NotImplementedError
@property
def eps(self):
raise NotImplementedError
| voipir/python-sii | src/sii/lib/printing/barcode/Barcode.py | Python | lgpl-3.0 | 331 |
import pytest
from forte.solvers import solver_factory, HF
def test_df_rhf():
"""Test DF-RHF on HF."""
ref_energy = -100.04775218911111
# define a molecule
xyz = """
H 0.0 0.0 0.0
F 0.0 0.0 1.0
"""
# create a molecular model
input = solver_factory(molecule=xyz, basis='cc-pVTZ', int_type='df')
# specify the electronic state
state = input.state(charge=0, multiplicity=1, sym='a1')
# create a HF object and run
hf = HF(input, state=state)
hf.run()
assert hf.value('hf energy') == pytest.approx(ref_energy, 1.0e-10)
def test_df_rhf_select_aux():
"""Test DF-RHF on HF."""
ref_energy = -100.04775602524956
# define a molecule
xyz = """
H 0.0 0.0 0.0
F 0.0 0.0 1.0
"""
# create a molecular model
input = solver_factory(molecule=xyz, int_type='df', basis='cc-pVTZ', scf_aux_basis='cc-pVQZ-JKFIT')
# specify the electronic state
state = input.state(charge=0, multiplicity=1, sym='a1')
# create a HF object and run
hf = HF(input, state=state)
hf.run()
assert hf.value('hf energy') == pytest.approx(ref_energy, 1.0e-10)
if __name__ == "__main__":
test_df_rhf()
test_df_rhf_select_aux()
| evangelistalab/forte | tests/pytest/hf/test_df_hf.py | Python | lgpl-3.0 | 1,224 |
import time
import sys
def sizeof_fmt(num, unit='B'):
# source: http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
for uprexif in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{:3.2f} {}{}".format(num, uprexif, unit)
num /= 1024.0
return "{:3.2f} Yi{}".format(num, unit)
output = sys.stderr
progress_format = '{n} [{b}] {p:3.1f}% ({d}/{a}) {s}'
class FileTransferProgressBar(object):
# inspired by clint.textui.progress.Bar
def __init__(self, filesize, name='', width=32, empty_char=' ', filled_char='#', hide=None, speed_update=0.2,
bar_update=0.05, progress_format=progress_format):
self.name, self.filesize, self.width, self.ec, self.fc = name, filesize, width, empty_char, filled_char
self.speed_update, self.bar_update, self.progress_format = speed_update, bar_update, progress_format
if hide is None:
try:
self.hide = not output.isatty()
except AttributeError:
self.hide = True
else:
self.hide = hide
self.last_progress = 0
self.last_time = time.time()
self.last_speed_update = self.last_time
self.start_time = self.last_time
self.last_speed_progress = 0
self.last_speed = 0
self.max_bar_size = 0
def show(self, progress):
if time.time() - self.last_time > self.bar_update:
self.last_time = time.time()
self.last_progress = progress
if self.last_time - self.last_speed_update > self.speed_update:
self.last_speed = (self.last_speed_progress - progress) / float(self.last_speed_update - self.last_time)
self.last_speed_update = self.last_time
self.last_speed_progress = progress
status = self.width * progress // self.filesize
percent = float(progress * 100) / self.filesize
bar = self.progress_format.format(n=self.name, b=self.fc * status + self.ec * (self.width - status),
p=percent, d=sizeof_fmt(progress), a=sizeof_fmt(self.filesize),
s=sizeof_fmt(self.last_speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r' # workaround for ghosts
output.write(bar)
output.flush()
def done(self):
speed = self.filesize / float(time.time() - self.start_time)
bar = self.progress_format.format(n=self.name, b=self.fc * self.width, p=100, d=sizeof_fmt(self.filesize),
a=sizeof_fmt(self.filesize), s=sizeof_fmt(speed) + '/s')
max_bar = self.max_bar_size
self.max_bar_size = max(len(bar), self.max_bar_size)
bar = bar + (' ' * (max_bar - len(bar))) + '\r'
output.write(bar)
output.write('\n')
output.flush()
| JuniorJPDJ/pyChomikBox | ChomikBox/utils/FileTransferProgressBar.py | Python | lgpl-3.0 | 3,099 |
from ctypes import*
import math
lib = cdll.LoadLibrary("Z:\\Documents\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMIComponent\\bin\\Debug\\SWMMComponent.dll")
print(lib)
print("\n")
finp = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.inp"
frpt = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.rpt"
fout = b"Z:\\Documents\\Projects\\SWMMOpenMIComponent\\Source\\SWMMOpenMINoGlobalsPythonTest\\test.out"
project = lib.swmm_open(finp , frpt , fout)
print(project)
print("\n")
newHour = 0
oldHour = 0
theDay = 0
theHour = 0
elapsedTime = c_double()
if(lib.swmm_getErrorCode(project) == 0):
lib.swmm_start(project, 1)
if(lib.swmm_getErrorCode(project) == 0):
print("Simulating day: 0 Hour: 0")
print("\n")
while True:
lib.swmm_step(project, byref(elapsedTime))
newHour = elapsedTime.value * 24
if(newHour > oldHour):
theDay = int(elapsedTime.value)
temp = math.floor(elapsedTime.value)
temp = (elapsedTime.value - temp) * 24.0
theHour = int(temp)
#print("\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
#print("\n")
print "Hour " , str(theHour) , " Day " , str(theDay) , ' \r',
#print("\n")
oldHour = newHour
if(elapsedTime.value <= 0 or not lib.swmm_getErrorCode(project) == 0):
break
lib.swmm_end(project)
lib.swmm_report(project)
lib.swmm_close(project)
| cbuahin/SWMMOpenMIComponent | Source/SWMMOpenMINoGlobalsPythonTest/SWMMOpenMINoGlobalsPythonTest.py | Python | lgpl-3.0 | 1,641 |
import pytest
import importlib
from mpi4py import MPI
from spectralDNS import config, get_solver, solve
from TGMHD import initialize, regression_test, pi
comm = MPI.COMM_WORLD
if comm.Get_size() >= 4:
params = ('uniform_slab', 'nonuniform_slab',
'uniform_pencil', 'nonuniform_pencil')
else:
params = ('uniform', 'nonuniform')
@pytest.fixture(params=params)
def sol(request):
"""Check for uniform and non-uniform cube"""
pars = request.param.split('_')
mesh = pars[0]
mpi = 'slab'
if len(pars) == 2:
mpi = pars[1]
_args = ['--decomposition', mpi]
if mesh == 'uniform':
_args += ['--M', '4', '4', '4', '--L', '2*pi', '2*pi', '2*pi']
else:
_args += ['--M', '6', '5', '4', '--L', '6*pi', '4*pi', '2*pi']
_args += ['MHD']
return _args
def test_MHD(sol):
config.update(
{
'nu': 0.000625, # Viscosity
'dt': 0.01, # Time step
'T': 0.1, # End time
'eta': 0.01,
'L': [2*pi, 4*pi, 6*pi],
'M': [4, 5, 6],
'convection': 'Divergence'
}
)
solver = get_solver(regression_test=regression_test,
parse_args=sol)
context = solver.get_context()
initialize(**context)
solve(solver, context)
config.params.dealias = '3/2-rule'
initialize(**context)
solve(solver, context)
config.params.dealias = '2/3-rule'
config.params.optimization = 'cython'
importlib.reload(solver)
initialize(**context)
solve(solver, context)
config.params.write_result = 1
config.params.checkpoint = 1
config.dt = 0.01
config.params.t = 0.0
config.params.tstep = 0
config.T = 0.04
solver.regression_test = lambda c: None
solve(solver, context)
| spectralDNS/spectralDNS | tests/test_MHD.py | Python | lgpl-3.0 | 1,847 |
"""
Sample a specific geometry or set of geometries.
"""
import numpy as np
import nomad.core.glbl as glbl
import nomad.core.trajectory as trajectory
import nomad.core.log as log
def set_initial_coords(wfn):
"""Takes initial position and momentum from geometry specified in input"""
coords = glbl.properties['init_coords']
ndim = coords.shape[-1]
log.print_message('string',[' Initial coordinates taken from input file(s).\n'])
for coord in coords:
itraj = trajectory.Trajectory(glbl.properties['n_states'], ndim,
width=glbl.properties['crd_widths'],
mass=glbl.properties['crd_masses'],
parent=0, kecoef=glbl.modules['integrals'].kecoef)
# set position and momentum
itraj.update_x(np.array(coord[0]))
itraj.update_p(np.array(coord[1]))
# add a single trajectory specified by geometry.dat
wfn.add_trajectory(itraj)
| mschuurman/FMSpy | nomad/initconds/explicit.py | Python | lgpl-3.0 | 1,004 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright:
Wenjie Lei ([email protected]), 2016
:license:
GNU General Public License, Version 3
(http://www.gnu.org/copyleft/gpl.html)
"""
from __future__ import (absolute_import, division, print_function) # NOQA
from .adjoint_source import calculate_adjsrc_on_stream # NOQA
from .adjoint_source import calculate_and_process_adjsrc_on_stream # NOQA
from .adjoint_source import calculate_adjsrc_on_trace # NOQA
from .adjoint_source import measure_adjoint_on_stream # NOQA
| wjlei1990/pytomo3d | pytomo3d/adjoint/__init__.py | Python | lgpl-3.0 | 573 |
# -*- coding: utf-8 -*-
from common.db_sum import _metric_meta_db
'''get the data from table by name'''
def get_data_by_name(name, status=[1], other=0):
result = []
where = ''
if status:
status = ",".join([str(x) for x in status])
where += ' and status in ({}) '.format(status)
if other:
where += ' and id not in ({}) '.format(other)
sql = """
select * from t_chart_reports where name="{}" {};
""".format(name, where)
try:
result = _metric_meta_db.query(sql)
if result:
result = result[0]
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''get chart from table by ids'''
def get_data_by_ids(sids):
result = []
sids = [str(x) for x in sids]
sql = """
select * from t_chart_reports where id in ({});
""".format(",".join(sids))
try:
result = _metric_meta_db.query(sql)
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''get the data from table by id'''
def get_data_by_id(sid):
result = []
sql = """
select * from t_chart_reports where id={} and status=1;
""".format(int(sid))
try:
result = _metric_meta_db.query(sql)
if result:
result = result[0]
except Exception, e:
from traceback import print_exc
print_exc()
return result
'''save data to chart table'''
def save(form):
hid = _metric_meta_db.insert('t_chart_reports', **form)
return hid
'''update chart table's data by id '''
def update(form):
_metric_meta_db.update('t_chart_reports', where="id={}".format(form['id']), **form)
return form['id']
'''get highchart_edit json'''
def get_chart(chart, data):
result = {}
if chart and data:
if chart.get('series', False):
first = data[0]
data = get_column_combine(data)
lens = len(first)
series = chart['series']
tmp_series = []
if series:
now_key = -1
for key, item in enumerate(series):
if key < lens - 1:
now_key = key
item['name'] = first[key + 1]
item['data'] = data[key]
tmp_series.append(item)
else:
break
template_series = series[-1]
for key, item in enumerate(first):
if key == 0:
continue
elif now_key < key - 1:
tmp = dict(template_series)
tmp['name'] = item
tmp['data'] = data[key - 1]
tmp['_colorIndex'] = key - 1
tmp['_symbolIndex'] = key - 1
tmp_series.append(tmp)
else:
tmp_series = series
chart['series'] = tmp_series
result = chart
return result
'''parse new data to highchart_edit json data'''
def get_column_combine(data):
result = []
if data:
lens = len(data[0])
if lens > 0:
result = [[] for i in xrange(lens)]
for key, item in enumerate(data):
if key > 0:
for k, it in enumerate(item):
if k > 0:
if it == '':
result[k - 1].append([item[0], None])
else:
if type(it) == str or type(it) == unicode:
try:
if r"." in it:
if r"," in it:
tmp = it.replace(",", "")
it = float(tmp)
else:
it = float(it)
elif r"," in it:
tmp = it.replace(",", "")
it = int(tmp)
else:
it = int(it)
except Exception, e:
from traceback import print_exc
print_exc()
result[k - 1].append([item[0], it])
return result
'''get the chart list'''
def get_chart_list(sid="", name="", fields=[], iscount=False, current=1, rowCount=20):
where = []
limit = ''
if sid:
if type(sid) != list:
sid = [sid]
where.append("""and id in ({})""".format(",".join(map(str, sid))))
if name:
where.append("""and name like "%{}%" """.format(name))
if rowCount:
stc = (int(current) - 1) * int(rowCount)
if not stc:
stc = 0
limit = "limit {},{}".format(int(current) - 1, rowCount)
content = "*"
orders = "order by id desc"
if iscount:
limit = ""
content = "count(*) as c"
orders = ""
elif fields:
content = ",".join(fields)
sql = """
select {} from t_chart_reports where status=1 {} {} {};
""".format(content, " ".join(where), orders, limit)
result = _metric_meta_db.query(sql)
if iscount:
if result:
return result[0]['c']
else:
return 0
else:
if result:
return result
else:
return []
| popoyz/charts | base/chart_b.py | Python | lgpl-3.0 | 5,726 |
from rapidsms.tests.scripted import TestScript
from apps.form.models import *
from apps.reporters.models import *
import apps.reporters.app as reporter_app
import apps.supply.app as supply_app
import apps.form.app as form_app
import apps.default.app as default_app
from app import App
from django.core.management.commands.dumpdata import Command
import time
import random
import os
from datetime import datetime
class TestApp (TestScript):
#apps = (reporter_app.App, App,form_app.App, supply_app.App, default_app.App )
apps = (reporter_app.App, App,form_app.App, supply_app.App )
# the test_backend script does the loading of the dummy backend that allows reporters
# to work properly in tests
fixtures = ['nigeria_llin', 'test_kano_locations', 'test_backend']
def setUp(self):
TestScript.setUp(self)
def testFixtures(self):
self._testKanoLocations()
self._testForms()
self._testRoles()
def testScript(self):
a = """
8005551219 > llin register 20 dl crummy user
8005551219 < Hello crummy! You are now registered as Distribution point team leader at KANO State.
"""
self.runScript(a)
# this should succeed because we just created him
reporters = Reporter.objects.all()
Reporter.objects.get(alias="cuser")
dict = {"alias":"fail"}
# make sure checking a non-existant user fails
self.assertRaises(Reporter.DoesNotExist, Reporter.objects.get, **dict)
testRegistration = """
8005551212 > llin my status
8005551212 < Please register your phone with RapidSMS.
8005551212 > llin register 20 dl dummy user
8005551212 < Hello dummy! You are now registered as Distribution point team leader at KANO State.
8005551212 > llin my status
8005551212 < I think you are dummy user.
#duplicate submission
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
# this one should be a duplicate
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello again duplicate! You are already registered as a Distribution point team leader at KANO State.
# but all of these should create a new registration
test_reg_dup > llin register 20 dl duplicate user withanothername
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicate userlonger
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicated user
test_reg_dup < Hello duplicated! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 sm duplicate user
test_reg_dup < Hello duplicate! You are now registered as Stock manager at KANO State.
test_reg_dup > llin register 2001 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at AJINGI LGA.
# case sensitivity
test_reg_2 > llin REGISTER 20 dl another user
test_reg_2 < Hello another! You are now registered as Distribution point team leader at KANO State.
# different name formats
test_reg_3 > llin register 20 dl onename
test_reg_3 < Hello onename! You are now registered as Distribution point team leader at KANO State.
# these fail
test_reg_4 > llin register 20 dl mister three names
test_reg_4 < Hello mister! You are now registered as Distribution point team leader at KANO State.
test_reg_5 > llin register 20 dl mister four name guy
test_reg_5 < Hello mister! You are now registered as Distribution point team leader at KANO State.
# some other spellings
test_reg_short > llin regstr 20 dl short user
test_reg_short < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_2 > llin regs 20 dl short user
test_reg_short_2 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_3 > llin reg 20 dl short user
test_reg_short_3 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_long > llin registered 20 dl long user
test_reg_long < Hello long! You are now registered as Distribution point team leader at KANO State.
# extra spaces
test_reg_8 > llin register 20 dl space guy
test_reg_8 < Hello space! You are now registered as Distribution point team leader at KANO State.
# new tests for more flexible roles
test_reg_dl > llin register 20 dl distribution leader
test_reg_dl < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_2 > llin register 20 ds distribution leader
test_reg_dl_2 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_3 > llin register 20 dm distribution leader
test_reg_dl_3 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_4 > llin register 20 dp distribution leader
test_reg_dl_4 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_lf > llin register 20 lf lga focal person
test_reg_lf < Hello lga! You are now registered as LGA focal person at KANO State.
test_reg_lf > llin register 20 lp lga focal person
test_reg_lf < Hello again lga! You are already registered as a LGA focal person at KANO State.
# alas, we're not perfect
test_reg_fail > llin rgstr 20 dl sorry guy
test_reg_fail < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testRegistrationErrors = """
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 45 DL hello world
12345 < Invalid form. 45 not in list of location codes
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 20 pp hello world
12345 < Invalid form. Unknown role code: pp
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 6803 AL hello world
12345 < Invalid form. 6803 not in list of location codes. Unknown role code: AL
12345 > llin my status
12345 < Please register your phone with RapidSMS.
"""
testKeyword= """
tkw_1 > llin register 20 dl keyword tester
tkw_1 < Hello keyword! You are now registered as Distribution point team leader at KANO State.
# base case
tkw_1 > llin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# capitalize the domain
tkw_1 > LLIN nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# drop an L
tkw_1 > lin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# mix the order - this is no longer supported
#tkw_1 > ILLn nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
#tkw_1 > ilin nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# ll anything works?
tkw_1 > ll nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
tkw_1 > llan nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# don't support w/o keyword
tkw_1 > nets 2001 123 456 78 90
# the default app to the rescue!
tkw_1 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNets= """
8005551213 > llin register 2001 lf net guy
8005551213 < Hello net! You are now registered as LGA focal person at AJINGI LGA.
8005551213 > llin nets 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
8005551213 > llin nets 2001 123 456 78
8005551213 < Invalid form. The following fields are required: discrepancy
# test some of the different form prefix options
# case sensitivity
8005551213 > llin NETS 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# no s
8005551213 > llin net 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# really? this works?
8005551213 > llin Nt 2001 123 456 78 90
8005551213 < Thank you net. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# something's gotta fail
8005551213 > llin n 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin bednets 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
8005551213 > llin ents 2001 123 456 78 90
8005551213 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testNetCards= """
8005551214 > llin register 200201 lf card guy
8005551214 < Hello card! You are now registered as LGA focal person at ALBASU CENTRAL Ward.
8005551214 > llin net cards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin net cards 200201 123 456
8005551214 < Invalid form. The following fields are required: issued
# test some of the different form prefix options
# case sensitivity
8005551214 > llin NET CARDS 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# no s
8005551214 > llin net card 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# one word
8005551214 > llin netcards 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin netcard 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# he he
8005551214 > llin nt cd 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
8005551214 > llin ntcrds 200201 123 456 78
8005551214 < Thank you card. Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78
# something's gotta fail
8005551214 > llin cards 200201 123 456 78
8005551214 < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testUnregisteredSubmissions = """
tus_1 > llin net cards 200201 123 456 78
tus_1 < Received report for LLIN NET CARDS: location=ALBASU CENTRAL, settlements=123, people=456, distributed=78. Please register your phone
tus_1 > llin my status
tus_1 < Please register your phone with RapidSMS.
tus_2 > llin nets 2001 123 456 78 90
tus_2 < Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90. Please register your phone
tus_2 > llin my status
tus_2 < Please register your phone with RapidSMS.
"""
def testGenerateNetFixtures(self):
""" This isn't actually a test. It just takes advantage
of the test harness to spam a bunch of messages to the
nigeria app and spit out the data in a format that can
be sucked into a fixture. It should be moved to some
data generator at some point, but is being left here
for laziness sake """
# this is the number of net reports that will be generated
count = 0
# the sender will always be the same, for now
phone = "55555"
expected_actual_match_percent = .8
# allow specifying the minimum and maximum dates for message generation
min_date = datetime(2009,4,1)
max_date = datetime(2009,4,30)
min_time = time.mktime(min_date.timetuple())
max_time = time.mktime(max_date.timetuple())
# these are the locations that will be chosen. The actual
# location will be a distribution point under one of these
# wards
wards = [200101, 200102, 200103, 200104, 200105, 200106, 200107, 200108, 200109, 200110, 200201]
all_net_strings = []
for i in range(count):
# this first part generates a net form at a random DP
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
distributed = random.randint(50,500)
expected = random.randint(0,2000)
# create an actual amount based on the likelihood of match
if random.random() < expected_actual_match_percent:
actual = expected
else:
actual = random.randint(0,2000)
discrepancy = random.randint(0,distributed/5)
net_string = "%s@%s > llin nets %s %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), dp.code, distributed, expected, actual, discrepancy)
all_net_strings.append(net_string)
# the second part generates a net card form at a random MT
date = datetime.fromtimestamp(random.randint(min_time, max_time))
ward = Location.objects.get(code=random.choice(wards))
dp = random.choice(ward.children.all())
mt = random.choice(dp.children.all())
settlements = random.randint(3, 50)
people = random.randint(50, 600)
coupons = random.randint(50, 600)
net_card_string = "%s@%s > llin net cards %s %s %s %s" % (phone, date.strftime("%Y%m%d%H%M"), mt.code, settlements, people, coupons )
all_net_strings.append(net_card_string)
script = "\n".join(all_net_strings)
self.runScript(script)
dumpdata = Command()
filename = os.path.abspath(os.path.join(os.path.dirname(__file__),"fixtures/test_net_data.json"))
options = { "indent" : 2 }
datadump = dumpdata.handle("bednets", **options)
# uncomment these lines to save the fixture
# file = open(filename, "w")
# file.write(datadump)
# file.close()
# print "=== Successfully wrote fixtures to %s ===" % filename
#
def _testKanoLocations(self):
#TODO test for DPs and MTs
loc_types = LocationType.objects.all()
self.assertEqual(6, len(loc_types))
state = LocationType.objects.get(name="State")
lga = LocationType.objects.get(name="LGA")
ward = LocationType.objects.get(name="Ward")
locations = Location.objects.all()
# 1 state
self.assertEqual(1, len(locations.filter(type=state)))
# 44 lgas
self.assertEqual(44, len(locations.filter(type=lga)))
# 484 wards
self.assertEqual(484, len(locations.filter(type=ward)))
kano = locations.get(type=state)
self.assertEqual("KANO", kano.name)
self.assertEqual(44, len(kano.children.all()))
for lga in locations.filter(type=lga):
self.assertEqual(kano, lga.parent)
def _testForms(self):
forms = Form.objects.all()
self.assertEqual(5, len(forms))
for form_name in ["register", "issue", "receive", "nets", "netcards"]:
# this will throw an error if it doesn't exist
Form.objects.get(code__abbreviation=form_name)
def _testRoles(self):
# add this when we have a fixture for roles
roles = Role.objects.all()
self.assertEqual(4, len(roles))
for role_name in ["LGA focal person", "Ward supervisor", "Stock manager", "Distribution point team leader"]:
# this will throw an error if it doesn't exist
Role.objects.get(name=role_name)
| takinbo/rapidsms-borno | apps/bednets/tests.py | Python | lgpl-3.0 | 18,579 |
'''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin [email protected] www.jeremiepasserin.com
Fork Author: Miquel Campos [email protected] www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear.xsi.curve
# @author Jeremie Passerin
#
# @brief create, merge, split curves...
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import xsi, c, XSIMath, XSIFactory
import gear.xsi.utils as uti
import gear.xsi.transform as tra
##########################################################
# DRAW
##########################################################
# ========================================================
## Create a curve attached to given centers. One point per center.\n
# Do to so we use a cluster center operator per point. We could use an envelope (method to do so is in the code), but there was a reason I can't remember why it was better to use clustercenter.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param centers List of X3DObject or Collection - Object that will drive the curve.
# @param close Boolean - True to close the fcurve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @return NurbCurve - The newly created curve.
def addCnsCurve(parent, name, centers, close=False, degree=1):
# convert collections to list
centers = [center for center in centers]
if degree == 3:
if len(centers) == 2:
centers.insert(0, centers[0])
centers.append(centers[-1])
elif len(centers) == 3:
centers.append(centers[-1])
points = []
for center in centers:
points.append(center.Kinematics.Global.Transform.PosX)
points.append(center.Kinematics.Global.Transform.PosY)
points.append(center.Kinematics.Global.Transform.PosZ)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
crv_geo = curve.ActivePrimitive.Geometry
for i, center in enumerate(centers):
cluster = crv_geo.AddCluster( c.siVertexCluster, "center_%s"%i, [i] )
xsi.ApplyOp( "ClusterCenter", cluster.FullName+";"+center.FullName, 0, 0, None, 2)
# Here is a method to replace the cluster centers with an envelope
# envelopeop = curve.ApplyEnvelope(cCenters)
#
# aWeights = []
# for i in range(cCenters.Count):
# for j in range(cCenters.Count):
# if i == j:
# aWeights.append(100)
# else:
# aWeights.append(0)
#
# envelopeop.Weights.Array = aWeights
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve(parent, name, points, close=False, degree=1, t=XSIMath.CreateTransform(), color=[0,0,0]):
curve = parent.AddNurbsCurve(points, None, close, degree, c.siNonUniformParameterization, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with multiple subcurve.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param points List of Double - positions of the curve in a one dimension array [point0X, point0Y, point0Z, 1, point1X, point1Y, point1Z, 1, ...].
# @param ncp List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param kn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param nkn List of Double - See XSI SDK Docv for AddNurbsCurveList2.
# @param close List of Boolean - True to close the curve.
# @param degree List of Integer - 1 for linear curve, 3 for Cubic.
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the Null (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurve2(parent, name, points, ncp=[], kn=[], nkn=[], close=[], degree=[], t=XSIMath.CreateTransform(), color=[0,0,0]):
pointCount = len(ncp)
aPar = [c.siNonUniformParameterization for i in range(pointCount)]
curve = parent.AddNurbsCurveList2(pointCount, points, ncp, kn, nkn, close, degree, aPar, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
# ========================================================
## Create a NurbsCurve with a single subcurve from a list of position.
# @param parent X3DObject - Parent object.
# @param name String - Name.
# @param positions List of SIVector3 - positions of the curve points.
# @param close Boolean - True to close the curve.
# @param degree Integer - 1 for linear curve, 3 for Cubic.
# @param knotsPara - knots parametrization in the curve
# @param t SITransformation - Global transform.
# @param color List of Double - The RGB color of the object (ie. [1,0,0] for red).
# @return NurbCurve - The newly created curve.
def addCurveFromPos(parent, name, positions, close=False, degree=1, knotsPara=c.siNonUniformParameterization, t=XSIMath.CreateTransform(), color=[0,0,0]):
points = []
for v in positions:
points.append(v.X)
points.append(v.Y)
points.append(v.Z)
points.append(1)
curve = parent.AddNurbsCurve(points, None, close, degree, knotsPara, c.siSINurbs, name)
uti.setColor(curve, color)
curve.Kinematics.Global.Transform = t
return curve
##########################################################
# SUBCURVES
##########################################################
# Merge Curves ===========================================
## Merge given curve in one unique curve.
# @param curve List of NurbsCurve - The curves to merge.
# @return NurbsCurve.
def mergeCurves(curves):
points = []
ncp = []
kn = []
nkn = []
closed = []
degree = []
for curve in curves:
curve_matrix = curve.Kinematics.Global.Transform.Matrix4
for nurbscrv in curve.ActivePrimitive.Geometry.Curves:
ncp.append(nurbscrv.ControlPoints.Count)
kn.extend(nurbscrv.Knots.Array)
nkn.append(len(nurbscrv.Knots.Array))
closed.append(isClosed(nurbscrv))
degree.append(nurbscrv.Degree)
for point in nurbscrv.ControlPoints:
point_pos = point.Position
point_pos.MulByMatrix4InPlace(curve_matrix)
points.extend([point_pos.X, point_pos.Y,point_pos.Z, 1])
if len(ncp) > 1:
curve = addCurve2(xsi.ActiveSceneRoot, "curve", points, ncp, kn, nkn, closed, degree)
else:
curve = addCurve(xsi.ActiveSceneRoot, "curve", points, closed[0], degree[0])
return curve
# Split Curves ===========================================
## Split the sub curve of given curve.
# @param curve NurbsCurve - The curves to split.
# @return List of NurbsCurve.
def splitCurve(curve):
t = curve.Kinematics.Global.Transform
curves = [addCurve(curve.Parent, curve.Name+str(i), nurbscrv.ControlPoints.Array, isClosed(nurbscrv), nurbscrv.Degree, t) for i, nurbscrv in enumerate(curve.ActivePrimitive.Geometry.Curves)]
return curves
# Is Closed ==============================================
## Return true if the given nurbscurve is closed.
# @param nurbscrv NurbsCurve - The nurbs curves to check.
# @return Boolean.
def isClosed(nurbscrv):
if nurbscrv.Degree == 3:
return not nurbscrv.ControlPoints.Count == (len(nurbscrv.Knots.Array)-2)
else:
return not nurbscrv.ControlPoints.Count == len(nurbscrv.Knots.Array)
##########################################################
# OPERATOR
##########################################################
# Apply Curve Resampler Op ===============================
## Resample the curve on itself, code of the operator is in the plugin sn_CurveTools
# @param curve NurbsCurve - The curve to resample.
# @return Operator
def applyCurveResamplerOp(curve):
op = XSIFactory.CreateObject("gear_CurveResamplerOp")
op.AddIOPort(curve.ActivePrimitive)
op.Connect()
return op
##########################################################
# EVAL CURVE
##########################################################
# ========================================================
def getGlobalPositionFromPercentage(percentage, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
position = crv_sub.EvaluatePositionFromPercentage(percentage)[0]
position = XSIMath.MapObjectPositionToWorldSpace(crv_tra, position)
return position
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @return Double
def getClosestU(position, crv, normalized=False):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
u = rtn[2]
if normalized:
u = crv_sub.GetNormalizedUFromU(u)
return u
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @return Double
def getClosestPercentage(position, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
crv_sub = crv_geo.Curves(rtn[0])
perc = crv_sub.GetPercentageFromU(rtn[2])
return perc
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest Global position
def getClosestGlobalTransform(position, crv, subcurve=0, tan_axis="x", upv_axis="y", normal=XSIMath.CreateVector3(0,1,0)):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
rtn = crv_geo.GetClosestCurvePosition2(pos)
u = rtn[2]
pos = rtn[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
tan = crv_sub.EvaluatePosition(u)[1]
r = crv_tra.Rotation
r.InvertInPlace()
tan.MulByRotationInPlace(r)
tan.AddInPlace(pos)
t = tra.getTransformLookingAt(pos, tan, normal, tan_axis+upv_axis, False)
return t
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest Global position
def getClosestGlobalPosition(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
pos = crv_geo.GetClosestCurvePosition2(pos)[3]
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, pos)
return pos
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalTangent(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
tan = crv_sub.EvaluatePosition(u)[1]
tan.MulByRotationInPlace(crv_tra.Rotation)
return tan
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
nor = crv_sub.EvaluatePosition(u)[2]
nor.MulByRotationInPlace(crv_tra.Rotation)
return nor
# ========================================================
# @param position SIVector3 - The global position
# @param crv NurbsCurve - The curve to eval
# @param subcurve int - The index of subcurve to eval
# @return SIVector3 - The closest tangent
def getClosestGlobalBiNormal(position, crv, subcurve=0):
crv_geo = crv.ActivePrimitive.Geometry
crv_sub = crv_geo.Curves(subcurve)
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapWorldPositionToObjectSpace(crv_tra, position)
u = crv_geo.GetClosestCurvePosition2(pos)[2]
bin = crv_sub.EvaluatePosition(u)[3]
bin.MulByRotationInPlace(crv_tra.Rotation)
return bin
# ========================================================
def getGlobalPointPosition(index, crv):
crv_geo = crv.ActivePrimitive.Geometry
crv_tra = crv.Kinematics.Global.Transform
pos = XSIMath.MapObjectPositionToWorldSpace(crv_tra, crv_geo.Points(index).Position)
return pos | miquelcampos/GEAR_mc | gear/xsi/curve.py | Python | lgpl-3.0 | 14,878 |
from test_methods import TestBaseFeedlyClass | pedroma/python-feedly | tests/__init__.py | Python | lgpl-3.0 | 44 |
"""
Created on 2013-12-16
@author: readon
@copyright: reserved
@note: CustomWidget example for mvp
"""
from gi.repository import Gtk
from gi.repository import GObject
class CustomEntry(Gtk.Entry):
"""
custom widget inherit from gtkentry.
"""
def __init__(self):
Gtk.Entry.__init__(self)
print "this is a custom widget loading"
GObject.type_register(CustomEntry)
| Readon/mvpsample | src/gtkcustom.py | Python | lgpl-3.0 | 414 |
# -*- coding: utf-8 -*-
import hashlib
import io
import struct
# default from KeePass2 source
BLOCK_LENGTH = 1024 * 1024
try:
file_types = (file, io.IOBase)
except NameError:
file_types = (io.IOBase,)
# HEADER_LENGTH = 4+32+4
def read_int(stream, length):
try:
return struct.unpack('<I', stream.read(length))[0]
except Exception:
return None
class HashedBlockIO(io.BytesIO):
"""
The data is stored in hashed blocks. Each block consists of a block index (4
bytes), the hash (32 bytes) and the block length (4 bytes), followed by the
block data. The block index starts counting at 0. The block hash is a
SHA-256 hash of the block data. A block has a maximum length of
BLOCK_LENGTH, but can be shorter.
Provide a I/O stream containing the hashed block data as the `block_stream`
argument when creating a HashedBlockReader. Alternatively the `bytes`
argument can be used to hand over data as a string/bytearray/etc. The data
is verified upon initialization and an IOError is raised when a hash does
not match.
HashedBlockReader is a subclass of io.BytesIO. The inherited read, seek, ...
functions shall be used to access the verified data.
"""
def __init__(self, block_stream=None, bytes=None):
io.BytesIO.__init__(self)
input_stream = None
if block_stream is not None:
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
input_stream = block_stream
elif bytes is not None:
input_stream = io.BytesIO(bytes)
if input_stream is not None:
self.read_block_stream(input_stream)
def read_block_stream(self, block_stream):
"""
Read the whole block stream into the self-BytesIO.
"""
if not (isinstance(block_stream, io.IOBase) or isinstance(block_stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
while True:
data = self._next_block(block_stream)
if not self.write(data):
break
self.seek(0)
def _next_block(self, block_stream):
"""
Read the next block and verify the data.
Raises an IOError if the hash does not match.
"""
index = read_int(block_stream, 4)
bhash = block_stream.read(32)
length = read_int(block_stream, 4)
if length > 0:
data = block_stream.read(length)
if hashlib.sha256(data).digest() == bhash:
return data
else:
raise IOError('Block hash mismatch error.')
return bytes()
def write_block_stream(self, stream, block_length=BLOCK_LENGTH):
"""
Write all data in this buffer, starting at stream position 0, formatted
in hashed blocks to the given `stream`.
For example, writing data from one file into another as hashed blocks::
# create new hashed block io without input stream or data
hb = HashedBlockIO()
# read from a file, write into the empty hb
with open('sample.dat', 'rb') as infile:
hb.write(infile.read())
# write from the hb into a new file
with open('hb_sample.dat', 'w') as outfile:
hb.write_block_stream(outfile)
"""
if not (isinstance(stream, io.IOBase) or isinstance(stream, file_types)):
raise TypeError('Stream does not have the buffer interface.')
index = 0
self.seek(0)
while True:
data = self.read(block_length)
if data:
stream.write(struct.pack('<I', index))
stream.write(hashlib.sha256(data).digest())
stream.write(struct.pack('<I', len(data)))
stream.write(data)
index += 1
else:
stream.write(struct.pack('<I', index))
stream.write('\x00' * 32)
stream.write(struct.pack('<I', 0))
break
| AlessandroZ/LaZagne | Windows/lazagne/softwares/memory/libkeepass/hbio.py | Python | lgpl-3.0 | 4,214 |
###################################################################################
#
# Copyright (c) 2017-2019 MuK IT GmbH.
#
# This file is part of MuK Documents Access
# (see https://mukit.at).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
from odoo import models, fields, api
class AccessGroups(models.Model):
_inherit = 'muk_security.access_groups'
#----------------------------------------------------------
# Database
#----------------------------------------------------------
directories = fields.Many2many(
comodel_name='muk_dms.directory',
relation='muk_dms_directory_groups_rel',
string="Directories",
column1='gid',
column2='aid',
readonly=True)
count_directories = fields.Integer(
compute='_compute_count_directories',
string="Count Directories")
#----------------------------------------------------------
# Read, View
#----------------------------------------------------------
@api.depends('directories')
def _compute_count_directories(self):
for record in self:
record.count_directories = len(record.directories) | muk-it/muk_dms | muk_dms_access/models/access_groups.py | Python | lgpl-3.0 | 1,927 |
"""
-----------------------------------------------------------------------------
This source file is part of OSTIS (Open Semantic Technology for Intelligent Systems)
For the latest info, see http://www.ostis.net
Copyright (c) 2010 OSTIS
OSTIS is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OSTIS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with OSTIS. If not, see <http://www.gnu.org/licenses/>.
-----------------------------------------------------------------------------
"""
'''
Created on Oct 2, 2009
@author: Denis Koronchik
'''
import pm
import time
import thread
import threading
import sys, traceback
class Processor(threading.Thread):
def __init__(self, params = {}):
"""Constuructor
@param params: dictionary with parameters to start processor module
Available parameters:
repo_path - path to repository folder
@type params: dict
"""
threading.Thread.__init__(self)
self.stoped = False
self.finished = False
self.started = False
self.__repo_path = '.'
if params.has_key('repo_path'):
self.__repo_path = params['repo_path']
self.start()
def run(self):
try:
pm.do_init(False, True, self.__repo_path, False)
pm.do_dedicated(False)
except:
print "Error:", sys.exc_info()[0]
traceback.print_exc(file=sys.stdout)
return
self.started = True
while not self.stoped:
pm.do_step()
time.sleep(0.01)
pm.libsc_deinit()
self.finished = True
def stop(self):
self.stoped = True
class Callback(pm.sc_event_multi):
def __init__(self):
pm.sc_event_multi.__init__(self)
self.__disown__()
def activate(self, wait_type, params, len):
print str(params)
class TestOp(pm.ScOperationActSetMember):
def __init__(self, aset):
pm.ScOperationActSetMember.__init__(self, "Test", aset)
def activateImpl(self, arc, el):
print "Hello"
Processor({'repo_path': '../repo/fs_repo'})
#call = Callback()
#time.sleep(5)
#print "Open segment"
#seg = pm.get_session().open_segment("/proc/keynode")
#print seg
#
#print "Create element"
#print pm.get_session()
#node = pm.get_session().create_el(seg, pm.SC_N_CONST)
#print node
#
#print "Attach event"
##call.attach_to(pm.get_session(), pm.SC_WAIT_HACK_SET_MEMBER, pm.ADDR_AS_PAR(node), 1)
#
#oper = TestOp(node)
#oper.registerOperation()
#
#node1 = pm.get_session().create_el(seg, pm.SC_N_CONST)
#line = pm.get_session().create_el(seg, pm.SC_A_CONST)
#
#pm.get_session().set_beg(line, node)
#pm.get_session().set_end(line, node1)
#line = pm.get_session().gen3_f_a_f(node, line, seg, pm.SC_A_CONST, node1)
| laz2/sc-core | bindings/python/sc_core/pm_test.py | Python | lgpl-3.0 | 3,358 |
# Copyright 2019 Rafis Bikbov <https://it-projects.info/team/RafiZz>
# Copyright 2019 Alexandr Kolushov <https://it-projects.info/team/KolushovAlexandr>
# Copyright 2019 Eugene Molotov <https://it-projects.info/team/em230418>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
import logging
from odoo import api, conf
from odoo.tests.common import HttpCase, tagged
_logger = logging.getLogger(__name__)
@tagged("post_install", "-at_install")
class TestProductTmplImage(HttpCase):
def _get_original_image_url(self, px=1024):
return "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1e/Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg/{}px-Gullfoss%2C_an_iconic_waterfall_of_Iceland.jpg".format(
px
)
def _get_odoo_image_url(self, model, record_id, field):
return "/web/image?model={}&id={}&field={}".format(model, record_id, field)
def test_getting_product_variant_image_fields_urls(self):
assert (
"ir_attachment_url" in conf.server_wide_modules
), "ir_attachment_url is not in server_wide_modules. Please add it via --load parameter"
env = api.Environment(self.registry.test_cr, self.uid, {})
env["ir.config_parameter"].set_param("ir_attachment_url.storage", "url")
product_tmpl = env["product.template"].create(
{
"name": "Test template",
"image": self._get_original_image_url(1024),
"image_medium": self._get_original_image_url(128),
"image_small": self._get_original_image_url(64),
}
)
product_product = env["product.product"].create(
{
"name": "Test product",
"image": False,
"image_medium": False,
"image_small": False,
"product_tmpl_id": product_tmpl.id,
}
)
odoo_image_url = self._get_odoo_image_url(
"product.product", product_product.id, "image"
)
odoo_image_medium_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_medium"
)
odoo_image_small_url = self._get_odoo_image_url(
"product.product", product_product.id, "image_small"
)
product_tmpl_image_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image", product_tmpl
)
product_tmpl_image_medium_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_medium", product_tmpl
)
product_tmpl_image_small_attachment = env["ir.http"].find_field_attachment(
env, "product.template", "image_small", product_tmpl
)
self.assertTrue(product_tmpl_image_attachment)
self.assertTrue(product_tmpl_image_medium_attachment)
self.assertTrue(product_tmpl_image_small_attachment)
self.authenticate("demo", "demo")
self.assertEqual(
self.url_open(odoo_image_url).url, product_tmpl_image_attachment.url
)
self.assertEqual(
self.url_open(odoo_image_medium_url).url,
product_tmpl_image_medium_attachment.url,
)
self.assertEqual(
self.url_open(odoo_image_small_url).url,
product_tmpl_image_small_attachment.url,
)
| yelizariev/addons-yelizariev | ir_attachment_url/tests/test_product_tmpl_image.py | Python | lgpl-3.0 | 3,397 |
# coding: utf-8
# <pycompressor - compress and merge static files (css,js) in html files>
# Copyright (C) <2012> Marcel Nicolay <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import sys
class CLI(object):
color = {
"PINK": "",
"BLUE": "",
"CYAN": "",
"GREEN": "",
"YELLOW": "",
"RED": "",
"END": "",
}
@staticmethod
def show_colors():
CLI.color = {
"PINK": "\033[35m",
"BLUE": "\033[34m",
"CYAN": "\033[36m",
"GREEN": "\033[32m",
"YELLOW": "\033[33m",
"RED": "\033[31m",
"END": "\033[0m",
}
def __init__(self):
self.__config_parser()
def __config_parser(self):
self.__parser = OptionParser(usage="usage: %prog [options] start")
self.__parser.add_option("-c", "--config",
dest="config_file",
default="compressor.yaml",
help="Use a specific config file. If not provided, will search for 'compressor.yaml' in the current directory.")
self.__parser.add_option("-s", "--sync",
dest="sync",
action="store_true",
default=False,
help="Sync files with S3")
self.__parser.add_option("-v", "--version",
action="store_true",
dest="compressor_version",
default=False,
help="Displays compressor version and exit.")
self.__parser.add_option("--color",
action="store_true",
dest="show_colors",
default=False,
help="Output with beautiful colors.")
self.__parser.add_option("--prefix",
dest="prefix",
default="min",
help="Use prefix in output js and css.")
def get_parser(self):
return self.__parser
def parse(self):
return self.__parser.parse_args()
def error_and_exit(self, msg):
self.msg("[ERROR] %s\n" % msg, "RED")
sys.exit(1)
def info_and_exit(self, msg):
self.msg("%s\n" % msg, "BLUE")
sys.exit(0)
def msg(self, msg, color="CYAN"):
print "%s%s%s" % (self.color[color], msg, self.color["END"]) | marcelnicolay/pycompressor | compressor/cli.py | Python | lgpl-3.0 | 2,967 |
from django.db import models
class Channel(models.Model):
channel_id = models.CharField(max_length=50, unique=True)
channel_name = models.CharField(max_length=50, null=True, blank=True)
rtmp_url = models.CharField(max_length=100, null=True, blank=True)
active = models.IntegerField(null=True, blank=True)
start = models.IntegerField(null=True, blank=True)
PID = models.IntegerField(null=True, blank=True)
PGID = models.IntegerField(null=True, blank=True)
client_ip = models.CharField(max_length=50, null=True, blank=True)
sort = models.IntegerField(null=False, blank=True, default=0)
class Meta:
managed = False
db_table = 'channel'
verbose_name = '频道'
verbose_name_plural = '频道管理'
def __str__(self):
return self.channel_name + '(' + self.channel_id + ')'
class Program(models.Model):
channel = models.ForeignKey(Channel, to_field='channel_id', null=True)
start_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
end_time = models.DateTimeField(auto_now_add=False, null=True, blank=True)
url = models.CharField(max_length=50, null=True, blank=True)
title = models.CharField(max_length=50, null=True, blank=True)
finished = models.IntegerField(null=True, blank=True, default=0)
event_id = models.IntegerField(null=True, blank=True)
class Meta:
managed = False
db_table = 'program'
verbose_name = '节目'
verbose_name_plural = '节目管理'
def __str__(self):
return str(self.channel) + ':' + self.title
| xahhy/Django-vod | epg/models.py | Python | lgpl-3.0 | 1,604 |
from __future__ import annotations
from decimal import Decimal
from typing import (
Any,
Mapping,
Sequence,
)
import uuid
from pprint import pprint
import pytest
from ai.backend.common.docker import ImageRef
from ai.backend.common.types import (
AccessKey, AgentId, KernelId,
ResourceSlot, SessionTypes,
)
from ai.backend.manager.scheduler import PendingSession, ExistingSession, AgentContext
from ai.backend.manager.scheduler.dispatcher import load_scheduler
from ai.backend.manager.scheduler.fifo import FIFOSlotScheduler, LIFOSlotScheduler
from ai.backend.manager.scheduler.drf import DRFScheduler
from ai.backend.manager.scheduler.mof import MOFScheduler
def test_load_intrinsic():
assert isinstance(load_scheduler('fifo', {}), FIFOSlotScheduler)
assert isinstance(load_scheduler('lifo', {}), LIFOSlotScheduler)
assert isinstance(load_scheduler('drf', {}), DRFScheduler)
assert isinstance(load_scheduler('mof', {}), MOFScheduler)
example_group_id = uuid.uuid4()
example_total_capacity = ResourceSlot({'cpu': '4.0', 'mem': '4096'})
@pytest.fixture
def example_agents():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_mixed_agents():
return [
AgentContext(
agent_id=AgentId('i-gpu'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
AgentContext(
agent_id=AgentId('i-cpu'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_first_one_assigned():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('2.0'),
'rocm.devices': Decimal('1'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
),
]
@pytest.fixture
def example_agents_no_valid():
return [
AgentContext(
agent_id=AgentId('i-001'),
agent_addr='10.0.1.1:6001',
scaling_group='sg01',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('4.0'),
'rocm.devices': Decimal('2'),
}),
),
AgentContext(
agent_id=AgentId('i-101'),
agent_addr='10.0.2.1:6001',
scaling_group='sg02',
available_slots=ResourceSlot({
'cpu': Decimal('0'),
'mem': Decimal('0'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
occupied_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('2560'),
'cuda.shares': Decimal('1.0'),
'rocm.devices': Decimal('8'),
}),
),
]
pending_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
existing_kernel_ids: Sequence[KernelId] = [
KernelId(uuid.uuid4()) for _ in range(3)
]
_common_dummy_for_pending_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
resource_policy={},
resource_opts={},
mounts=[],
mount_map={},
environ={},
bootstrap_script=None,
startup_command=None,
internal_data=None,
preopen_ports=[],
)
_common_dummy_for_existing_session: Mapping[str, Any] = dict(
image_ref=ImageRef('lablup/python:3.6-ubunt18.04'),
domain_name='default',
group_id=example_group_id,
)
@pytest.fixture
def example_pending_sessions():
# lower indicies are enqueued first.
return [
PendingSession( # rocm
kernel_id=pending_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('2.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cuda
kernel_id=pending_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
PendingSession( # cpu-only
kernel_id=pending_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
scaling_group='sg01',
requested_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
target_sgroup_names=[],
**_common_dummy_for_pending_session,
),
]
@pytest.fixture
def example_existing_sessions():
return [
ExistingSession(
kernel_id=existing_kernel_ids[0],
access_key=AccessKey('user01'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('3.0'),
'mem': Decimal('1024'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('1'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[1],
access_key=AccessKey('user02'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('1.0'),
'mem': Decimal('2048'),
'cuda.shares': Decimal('0.5'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
ExistingSession(
kernel_id=existing_kernel_ids[2],
access_key=AccessKey('user03'),
session_name='es01',
session_type=SessionTypes.BATCH,
occupying_slots=ResourceSlot({
'cpu': Decimal('4.0'),
'mem': Decimal('4096'),
'cuda.shares': Decimal('0'),
'rocm.devices': Decimal('0'),
}),
scaling_group='sg01',
**_common_dummy_for_existing_session,
),
]
def _find_and_pop_picked_session(pending_sessions, picked_session_id):
for picked_idx, pending_sess in enumerate(pending_sessions):
if pending_sess.kernel_id == picked_session_id:
break
else:
# no matching entry for picked session?
raise RuntimeError('should not reach here')
return pending_sessions.pop(picked_idx)
def test_fifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = FIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == AgentId('i-001')
def test_lifo_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = LIFOSlotScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[2].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_fifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
scheduler = FIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 0:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 2:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_lifo_scheduler_favor_cpu_for_requests_without_accelerators(
example_mixed_agents,
example_pending_sessions,
):
# Check the reverse with the LIFO scheduler.
# The result must be same.
scheduler = LIFOSlotScheduler({})
for idx in range(3):
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
[])
assert picked_session_id == example_pending_sessions[-1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_mixed_agents, picked_session)
if idx == 2:
# example_mixed_agents do not have any agent with ROCM accelerators.
assert agent_id is None
elif idx == 1:
assert agent_id == AgentId('i-gpu')
elif idx == 0:
# It should favor the CPU-only agent if the requested slots
# do not include accelerators.
assert agent_id == AgentId('i-cpu')
def test_drf_scheduler(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = DRFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
pprint(example_pending_sessions)
assert picked_session_id == example_pending_sessions[1].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_first_assign(example_agents, example_pending_sessions, example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents, picked_session)
assert agent_id == 'i-001'
def test_mof_scheduler_second_assign(example_agents_first_one_assigned, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(
example_agents_first_one_assigned, picked_session)
assert agent_id == 'i-101'
def test_mof_scheduler_no_valid_agent(example_agents_no_valid, example_pending_sessions,
example_existing_sessions):
scheduler = MOFScheduler({})
picked_session_id = scheduler.pick_session(
example_total_capacity,
example_pending_sessions,
example_existing_sessions)
assert picked_session_id == example_pending_sessions[0].kernel_id
picked_session = _find_and_pop_picked_session(
example_pending_sessions, picked_session_id)
agent_id = scheduler.assign_agent(example_agents_no_valid, picked_session)
assert agent_id is None
# TODO: write tests for multiple agents and scaling groups
| lablup/sorna-manager | tests/manager/test_scheduler.py | Python | lgpl-3.0 | 16,173 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: stats.py
"""Statistics analyzer for HotShot."""
import profile
import pstats
import hotshot.log
from hotshot.log import ENTER, EXIT
def load(filename):
return StatsLoader(filename).load()
class StatsLoader:
def __init__(self, logfn):
self._logfn = logfn
self._code = {}
self._stack = []
self.pop_frame = self._stack.pop
def load(self):
p = Profile()
p.get_time = _brokentimer
log = hotshot.log.LogReader(self._logfn)
taccum = 0
for event in log:
what, (filename, lineno, funcname), tdelta = event
if tdelta > 0:
taccum += tdelta
if what == ENTER:
frame = self.new_frame(filename, lineno, funcname)
p.trace_dispatch_call(frame, taccum * 1e-06)
taccum = 0
elif what == EXIT:
frame = self.pop_frame()
p.trace_dispatch_return(frame, taccum * 1e-06)
taccum = 0
return pstats.Stats(p)
def new_frame(self, *args):
try:
code = self._code[args]
except KeyError:
code = FakeCode(*args)
self._code[args] = code
if self._stack:
back = self._stack[-1]
else:
back = None
frame = FakeFrame(code, back)
self._stack.append(frame)
return frame
class Profile(profile.Profile):
def simulate_cmd_complete(self):
pass
class FakeCode:
def __init__(self, filename, firstlineno, funcname):
self.co_filename = filename
self.co_firstlineno = firstlineno
self.co_name = self.__name__ = funcname
class FakeFrame:
def __init__(self, code, back):
self.f_back = back
self.f_code = code
def _brokentimer():
raise RuntimeError, 'this timer should not be called' | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/hotshot/stats.py | Python | unlicense | 2,053 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: charset.py
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec']
import codecs
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
QP = 1
BASE64 = 2
SHORTEST = 3
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
CHARSETS = {'iso-8859-1': (
QP, QP, None),
'iso-8859-2': (
QP, QP, None),
'iso-8859-3': (
QP, QP, None),
'iso-8859-4': (
QP, QP, None),
'iso-8859-9': (
QP, QP, None),
'iso-8859-10': (
QP, QP, None),
'iso-8859-13': (
QP, QP, None),
'iso-8859-14': (
QP, QP, None),
'iso-8859-15': (
QP, QP, None),
'iso-8859-16': (
QP, QP, None),
'windows-1252': (
QP, QP, None),
'viscii': (
QP, QP, None),
'us-ascii': (None, None, None),
'big5': (
BASE64, BASE64, None),
'gb2312': (
BASE64, BASE64, None),
'euc-jp': (
BASE64, None, 'iso-2022-jp'),
'shift_jis': (
BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (
BASE64, None, None),
'koi8-r': (
BASE64, BASE64, None),
'utf-8': (
SHORTEST, BASE64, 'utf-8'),
'8bit': (
None, BASE64, 'utf-8')
}
ALIASES = {'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'latin_10': 'iso-8859-16',
'latin-10': 'iso-8859-16',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii'
}
CODEC_MAP = {'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
'us-ascii': None
}
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (
header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower().encode('ascii')
if not (input_charset in ALIASES or input_charset in CHARSETS):
try:
input_charset = codecs.lookup(input_charset).name
except LookupError:
pass
self.input_charset = ALIASES.get(input_charset, input_charset)
henc, benc, conv = CHARSETS.get(self.input_charset, (
SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
self.input_codec = CODEC_MAP.get(self.input_charset, self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset, self.output_charset)
return
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
if self.body_encoding == QP:
return 'quoted-printable'
else:
if self.body_encoding == BASE64:
return 'base64'
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec != self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
else:
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
return s
return
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
else:
try:
return ustr.encode(codec, 'replace')
except LookupError:
return ustr
return
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
else:
if self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
else:
if self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
if self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
return None
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
else:
if self.body_encoding is QP:
return email.quoprimime.body_encode(s)
return s | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/email/charset.py | Python | unlicense | 14,131 |
# ------------------------------------------------------------------------------
# This extension adds support for Jinja templates.
# ------------------------------------------------------------------------------
import sys
from ivy import hooks, site, templates
try:
import jinja2
except ImportError:
jinja2 = None
# Stores an initialized Jinja environment instance.
env = None
# The jinja2 package is an optional dependency.
if jinja2:
# Initialize our Jinja environment on the 'init' event hook.
@hooks.register('init')
def init():
# Initialize a template loader.
settings = {
'loader': jinja2.FileSystemLoader(site.theme('templates'))
}
# Check the site's config file for any custom settings.
settings.update(site.config.get('jinja', {}))
# Initialize an Environment instance.
global env
env = jinja2.Environment(**settings)
# Register our template engine callback for files with a .jinja extension.
@templates.register('jinja')
def callback(page, filename):
try:
template = env.get_template(filename)
return template.render(page)
except jinja2.TemplateError as err:
msg = "------------------------\n"
msg += " Jinja Template Error \n"
msg += "------------------------\n\n"
msg += " Template: %s\n" % filename
msg += " Page: %s\n\n" % page['filepath']
msg += " %s: %s" % (err.__class__.__name__, err)
if err.__context__:
cause = err.__context__
msg += "\n\n The following cause was reported:\n\n"
msg += " %s: %s" % (cause.__class__.__name__, cause)
sys.exit(msg)
| dmulholland/ivy | ivy/ext/ivy_jinja.py | Python | unlicense | 1,779 |
import random
# CoRe
def turn(board, symbol):
while 1:
x = random.choice(range(8))
y = random.choice(range(8))
if getboard(board,x,y) == '#': return (x,y)
| ac1235/core | ai_templates/crazy.py | Python | unlicense | 164 |
# encoding: utf-8
import re
import json
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
find_xpath_attr,
unified_strdate,
determine_ext,
get_element_by_id,
compat_str,
)
# There are different sources of video in arte.tv, the extraction process
# is different for each one. The videos usually expire in 7 days, so we can't
# add tests.
class ArteTvIE(InfoExtractor):
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
_LIVEWEB_URL = r'(?:http://)?liveweb.arte.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
_LIVE_URL = r'index-[0-9]+\.html$'
IE_NAME = u'arte.tv'
@classmethod
def suitable(cls, url):
return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL))
# TODO implement Live Stream
# from ..utils import compat_urllib_parse
# def extractLiveStream(self, url):
# video_lang = url.split('/')[-4]
# info = self.grep_webpage(
# url,
# r'src="(.*?/videothek_js.*?\.js)',
# 0,
# [
# (1, 'url', u'Invalid URL: %s' % url)
# ]
# )
# http_host = url.split('/')[2]
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
# info = self.grep_webpage(
# next_url,
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
# '(http://.*?\.swf).*?' +
# '(rtmp://.*?)\'',
# re.DOTALL,
# [
# (1, 'path', u'could not extract video path: %s' % url),
# (2, 'player', u'could not extract video player: %s' % url),
# (3, 'url', u'could not extract video url: %s' % url)
# ]
# )
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
def _real_extract(self, url):
mobj = re.match(self._VIDEOS_URL, url)
if mobj is not None:
id = mobj.group('id')
lang = mobj.group('lang')
return self._extract_video(url, id, lang)
mobj = re.match(self._LIVEWEB_URL, url)
if mobj is not None:
name = mobj.group('name')
lang = mobj.group('lang')
return self._extract_liveweb(url, name, lang)
if re.search(self._LIVE_URL, url) is not None:
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
# self.extractLiveStream(url)
# return
def _extract_video(self, url, video_id, lang):
"""Extract from videos.arte.tv"""
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
config_xml_url = config_node.attrib['ref']
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
def _key(m):
quality = m.group('quality')
if quality == 'hd':
return 2
else:
return 1
# We pick the best quality
video_urls = sorted(video_urls, key=_key)
video_url = list(video_urls)[-1].group('url')
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
config_xml, 'thumbnail')
return {'id': video_id,
'title': title,
'thumbnail': thumbnail,
'url': video_url,
'ext': 'flv',
}
def _extract_liveweb(self, url, name, lang):
"""Extract form http://liveweb.arte.tv/"""
webpage = self._download_webpage(url, name)
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, u'event id')
config_xml = self._download_webpage('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
video_id, u'Downloading information')
config_doc = xml.etree.ElementTree.fromstring(config_xml.encode('utf-8'))
event_doc = config_doc.find('event')
url_node = event_doc.find('video').find('urlHd')
if url_node is None:
url_node = event_doc.find('urlSd')
return {'id': video_id,
'title': event_doc.find('name%s' % lang.capitalize()).text,
'url': url_node.text.replace('MP4', 'mp4'),
'ext': 'flv',
'thumbnail': self._og_search_thumbnail(webpage),
}
class ArteTVPlus7IE(InfoExtractor):
IE_NAME = u'arte.tv:+7'
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
@classmethod
def _extract_url_info(cls, url):
mobj = re.match(cls._VALID_URL, url)
lang = mobj.group('lang')
# This is not a real id, it can be for example AJT for the news
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
video_id = mobj.group('id')
return video_id, lang
def _real_extract(self, url):
video_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, video_id)
return self._extract_from_webpage(webpage, video_id, lang)
def _extract_from_webpage(self, webpage, video_id, lang):
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
self.report_extraction(video_id)
info = json.loads(json_info)
player_info = info['videoJsonPlayer']
info_dict = {
'id': player_info['VID'],
'title': player_info['VTI'],
'description': player_info.get('VDE'),
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
}
all_formats = player_info['VSR'].values()
# Some formats use the m3u8 protocol
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
def _match_lang(f):
if f.get('versionCode') is None:
return True
# Return true if that format is in the language of the url
if lang == 'fr':
l = 'F'
elif lang == 'de':
l = 'A'
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
return any(re.match(r, f['versionCode']) for r in regexes)
# Some formats may not be in the same language as the url
formats = filter(_match_lang, all_formats)
formats = list(formats) # in python3 filter returns an iterator
if not formats:
# Some videos are only available in the 'Originalversion'
# they aren't tagged as being in French or German
if all(f['versionCode'] == 'VO' for f in all_formats):
formats = all_formats
else:
raise ExtractorError(u'The formats list is empty')
if re.match(r'[A-Z]Q', formats[0]['quality']) is not None:
def sort_key(f):
return ['HQ', 'MQ', 'EQ', 'SQ'].index(f['quality'])
else:
def sort_key(f):
return (
# Sort first by quality
int(f.get('height',-1)),
int(f.get('bitrate',-1)),
# The original version with subtitles has lower relevance
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
# The version with sourds/mal subtitles has also lower relevance
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
)
formats = sorted(formats, key=sort_key)
def _format(format_info):
quality = ''
height = format_info.get('height')
if height is not None:
quality = compat_str(height)
bitrate = format_info.get('bitrate')
if bitrate is not None:
quality += '-%d' % bitrate
if format_info.get('versionCode') is not None:
format_id = u'%s-%s' % (quality, format_info['versionCode'])
else:
format_id = quality
info = {
'format_id': format_id,
'format_note': format_info.get('versionLibelle'),
'width': format_info.get('width'),
'height': height,
}
if format_info['mediaType'] == u'rtmp':
info['url'] = format_info['streamer']
info['play_path'] = 'mp4:' + format_info['url']
info['ext'] = 'flv'
else:
info['url'] = format_info['url']
info['ext'] = determine_ext(info['url'])
return info
info_dict['formats'] = [_format(f) for f in formats]
return info_dict
# It also uses the arte_vp_url url from the webpage to extract the information
class ArteTVCreativeIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:creative'
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/magazine?/(?P<id>.+)'
_TEST = {
u'url': u'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
u'file': u'050489-002.mp4',
u'info_dict': {
u'title': u'Agentur Amateur / Agence Amateur #2 : Corporate Design',
},
}
class ArteTVFutureIE(ArteTVPlus7IE):
IE_NAME = u'arte.tv:future'
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(thema|sujet)/.*?#article-anchor-(?P<id>\d+)'
_TEST = {
u'url': u'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
u'file': u'050940-003.mp4',
u'info_dict': {
u'title': u'Les champignons au secours de la planète',
},
}
def _real_extract(self, url):
anchor_id, lang = self._extract_url_info(url)
webpage = self._download_webpage(url, anchor_id)
row = get_element_by_id(anchor_id, webpage)
return self._extract_from_webpage(row, anchor_id, lang)
| ashutosh-mishra/youtube-dl | youtube_dl/extractor/arte.py | Python | unlicense | 10,732 |
#!/usr/bin/env python
import sys
line = sys.stdin.readline() # skip the header
line = sys.stdin.readline()
all = {}
while line:
v = line.split()
if v[0] not in all:
all[v[0]] = set()
all[v[0]].add(v[1])
line = sys.stdin.readline()
s = [k for (_, k) in sorted([(len(v), k) for (k,v) in all.items()])]
print ' '.join(reversed(s))
for i in s:
print i,
for j in reversed(s):
print len(all[i].intersection(all[j])),
print
| razvanm/fs-expedition | heatmap.py | Python | unlicense | 464 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: suite.py
"""TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda : None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return '<%s tests=%s>' % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
__hash__ = None
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
if not hasattr(test, '__call__'):
raise TypeError('{} is not callable'.format(repr(test)))
if isinstance(test, type) and issubclass(test, (
case.TestCase, TestSuite)):
raise TypeError('TestCases and TestSuites must be instantiated before passing them to addTest()')
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, basestring):
raise TypeError('tests must be an iterable of tests, not a string')
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if getattr(test.__class__, '_classSetupFailed', False) or getattr(result, '_moduleSetUpFailed', False):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if result._moduleSetUpFailed:
return
if getattr(currentClass, '__unittest_skip__', False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
else:
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
return
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
else:
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
else:
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, '__unittest_skip__', False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
return
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return '<ErrorHolder description=%r>' % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"""A crude way to tell apart testcases and suites with duck-typing"""
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"""Used by the TestSuite to hold previous class when running in debug."""
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/unittest/suite.py | Python | unlicense | 10,084 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for BatchRunPivotReports
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-analytics-data
# [START analyticsdata_v1beta_generated_BetaAnalyticsData_BatchRunPivotReports_async]
from google.analytics import data_v1beta
async def sample_batch_run_pivot_reports():
# Create a client
client = data_v1beta.BetaAnalyticsDataAsyncClient()
# Initialize request argument(s)
request = data_v1beta.BatchRunPivotReportsRequest(
)
# Make the request
response = await client.batch_run_pivot_reports(request=request)
# Handle the response
print(response)
# [END analyticsdata_v1beta_generated_BetaAnalyticsData_BatchRunPivotReports_async]
| googleapis/python-analytics-data | samples/generated_samples/analyticsdata_v1beta_generated_beta_analytics_data_batch_run_pivot_reports_async.py | Python | apache-2.0 | 1,518 |
#!/usr/bin/env python3
# coding=utf-8
"""
This module, debugging.py, will contain code related to debugging (such as printing error messages).
"""
#import sys
#sys.path.insert(0, '/home/dev_usr/urbtek')
#from universal_code import system_operations as so
class MyException(Exception):
"""
Just something useful to have to throw some of my own custom exception.
"""
pass
class ParameterException(Exception):
"""
A custom exception for when a function receives bad parameter data.
"""
def __init__(self, message):
super(ParameterException, self).__init__(message)
class AbstractMethodNotImplementedException(Exception):
"""
A custom exception for when a function gets called that hasn't been set in a child class.
"""
def __init(self, message):
super(AbstractMethodNotImplementedException, self).__init__(message)
def raise_exception(exception, message):
raise exception(message)
TCP_LOCAL_HOST = 'tcp://127.0.0.1:'
LOCAL_HOST = '127.0.0.1'
NEXUS_DEV_RECEIVE_PORT = 40000
NEXUS_DEV_MANUAL_COMMUNICATION_PORT = 40001
NEXUS_DEV_AUTOMATED_COMMUNICATION_PORT = 40002
starting_port = NEXUS_DEV_AUTOMATED_COMMUNICATION_PORT + 1
def get_a_free_port():
global starting_port
# We can assume ports are free because ports above 30000 have been sealed off.
# TODO: THIS WILL BREAK WHEN MORE THAN DEV EXISTS.
starting_port += 1
return starting_port - 1
# Terminal font coloring and styling.
class TextColors:
HEADER = '\033[95m'
OK_BLUE = '\033[94m'
OK_GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_text_with_color(text, color, end=None):
if end is None:
print(color + text + TextColors.ENDC + '\n')
else:
print(color + text + TextColors.ENDC, end='')
def terminate(termination_message=''):
if termination_message is '':
print_text_with_color('Program termination has been initiated, good bye!', TextColors.FAIL)
else:
print_text_with_color(termination_message, TextColors.WARNING, '')
if not termination_message.endswith('.'):
print_text_with_color('. The program will now terminate.', TextColors.FAIL)
else:
print_text_with_color(' The program will now terminate.', TextColors.FAIL)
exit()
| utarsuno/urbtek | universal_code/debugging.py | Python | apache-2.0 | 2,265 |
"""Tests for the kraken sensor platform."""
from datetime import timedelta
from unittest.mock import patch
from pykrakenapi.pykrakenapi import KrakenAPIError
from homeassistant.components.kraken.const import (
CONF_TRACKED_ASSET_PAIRS,
DEFAULT_SCAN_INTERVAL,
DEFAULT_TRACKED_ASSET_PAIR,
DOMAIN,
)
from homeassistant.const import CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_START
import homeassistant.util.dt as dt_util
from .const import (
MISSING_PAIR_TICKER_INFORMATION_RESPONSE,
MISSING_PAIR_TRADEABLE_ASSET_PAIR_RESPONSE,
TICKER_INFORMATION_RESPONSE,
TRADEABLE_ASSET_PAIR_RESPONSE,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_sensor(hass):
"""Test that sensor has a value."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_RESPONSE,
), patch(
"pykrakenapi.KrakenAPI.get_ticker_information",
return_value=TICKER_INFORMATION_RESPONSE,
):
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="0123456789",
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_TRACKED_ASSET_PAIRS: [
"ADA/XBT",
"ADA/ETH",
"XBT/EUR",
"XBT/GBP",
"XBT/USD",
"XBT/JPY",
],
},
)
entry.add_to_hass(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_ask_volume",
suggested_object_id="xbt_usd_ask_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_last_trade_closed",
suggested_object_id="xbt_usd_last_trade_closed",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_bid_volume",
suggested_object_id="xbt_usd_bid_volume",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_today",
suggested_object_id="xbt_usd_volume_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_last_24h",
suggested_object_id="xbt_usd_volume_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_today",
suggested_object_id="xbt_usd_volume_weighted_average_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_volume_weighted_average_last_24h",
suggested_object_id="xbt_usd_volume_weighted_average_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_today",
suggested_object_id="xbt_usd_number_of_trades_today",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_number_of_trades_last_24h",
suggested_object_id="xbt_usd_number_of_trades_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_low_last_24h",
suggested_object_id="xbt_usd_low_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_high_last_24h",
suggested_object_id="xbt_usd_high_last_24h",
disabled_by=None,
)
registry.async_get_or_create(
"sensor",
DOMAIN,
"xbt_usd_opening_price_today",
suggested_object_id="xbt_usd_opening_price_today",
disabled_by=None,
)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
xbt_usd_sensor = hass.states.get("sensor.xbt_usd_ask")
assert xbt_usd_sensor.state == "0.0003494"
assert xbt_usd_sensor.attributes["icon"] == "mdi:currency-usd"
xbt_eur_sensor = hass.states.get("sensor.xbt_eur_ask")
assert xbt_eur_sensor.state == "0.0003494"
assert xbt_eur_sensor.attributes["icon"] == "mdi:currency-eur"
ada_xbt_sensor = hass.states.get("sensor.ada_xbt_ask")
assert ada_xbt_sensor.state == "0.0003494"
assert ada_xbt_sensor.attributes["icon"] == "mdi:currency-btc"
xbt_jpy_sensor = hass.states.get("sensor.xbt_jpy_ask")
assert xbt_jpy_sensor.state == "0.0003494"
assert xbt_jpy_sensor.attributes["icon"] == "mdi:currency-jpy"
xbt_gbp_sensor = hass.states.get("sensor.xbt_gbp_ask")
assert xbt_gbp_sensor.state == "0.0003494"
assert xbt_gbp_sensor.attributes["icon"] == "mdi:currency-gbp"
ada_eth_sensor = hass.states.get("sensor.ada_eth_ask")
assert ada_eth_sensor.state == "0.0003494"
assert ada_eth_sensor.attributes["icon"] == "mdi:cash"
xbt_usd_ask_volume = hass.states.get("sensor.xbt_usd_ask_volume")
assert xbt_usd_ask_volume.state == "15949"
xbt_usd_last_trade_closed = hass.states.get("sensor.xbt_usd_last_trade_closed")
assert xbt_usd_last_trade_closed.state == "0.0003478"
xbt_usd_bid_volume = hass.states.get("sensor.xbt_usd_bid_volume")
assert xbt_usd_bid_volume.state == "20792"
xbt_usd_volume_today = hass.states.get("sensor.xbt_usd_volume_today")
assert xbt_usd_volume_today.state == "146300.24906838"
xbt_usd_volume_last_24h = hass.states.get("sensor.xbt_usd_volume_last_24h")
assert xbt_usd_volume_last_24h.state == "253478.04715403"
xbt_usd_volume_weighted_average_today = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_today"
)
assert xbt_usd_volume_weighted_average_today.state == "0.000348573"
xbt_usd_volume_weighted_average_last_24h = hass.states.get(
"sensor.xbt_usd_volume_weighted_average_last_24h"
)
assert xbt_usd_volume_weighted_average_last_24h.state == "0.000344881"
xbt_usd_number_of_trades_today = hass.states.get(
"sensor.xbt_usd_number_of_trades_today"
)
assert xbt_usd_number_of_trades_today.state == "82"
xbt_usd_number_of_trades_last_24h = hass.states.get(
"sensor.xbt_usd_number_of_trades_last_24h"
)
assert xbt_usd_number_of_trades_last_24h.state == "128"
xbt_usd_low_last_24h = hass.states.get("sensor.xbt_usd_low_last_24h")
assert xbt_usd_low_last_24h.state == "0.0003446"
xbt_usd_high_last_24h = hass.states.get("sensor.xbt_usd_high_last_24h")
assert xbt_usd_high_last_24h.state == "0.0003521"
xbt_usd_opening_price_today = hass.states.get(
"sensor.xbt_usd_opening_price_today"
)
assert xbt_usd_opening_price_today.state == "0.0003513"
async def test_missing_pair_marks_sensor_unavailable(hass):
"""Test that a missing tradable asset pair marks the sensor unavailable."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"pykrakenapi.KrakenAPI.get_tradable_asset_pairs",
return_value=TRADEABLE_ASSET_PAIR_RESPONSE,
) as tradeable_asset_pairs_mock, patch(
"pykrakenapi.KrakenAPI.get_ticker_information",
return_value=TICKER_INFORMATION_RESPONSE,
) as ticket_information_mock:
entry = MockConfigEntry(
domain=DOMAIN,
options={
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_TRACKED_ASSET_PAIRS: [DEFAULT_TRACKED_ASSET_PAIR],
},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.xbt_usd_ask")
assert sensor.state == "0.0003494"
tradeable_asset_pairs_mock.return_value = (
MISSING_PAIR_TRADEABLE_ASSET_PAIR_RESPONSE
)
ticket_information_mock.side_effect = KrakenAPIError(
"EQuery:Unknown asset pair"
)
async_fire_time_changed(
hass, utcnow + timedelta(seconds=DEFAULT_SCAN_INTERVAL * 2)
)
await hass.async_block_till_done()
ticket_information_mock.side_effect = None
ticket_information_mock.return_value = MISSING_PAIR_TICKER_INFORMATION_RESPONSE
async_fire_time_changed(
hass, utcnow + timedelta(seconds=DEFAULT_SCAN_INTERVAL * 2)
)
await hass.async_block_till_done()
sensor = hass.states.get("sensor.xbt_usd_ask")
assert sensor.state == "unavailable"
| lukas-hetzenecker/home-assistant | tests/components/kraken/test_sensor.py | Python | apache-2.0 | 9,736 |
"""
pluginconf.d configuration file - Files
=======================================
Shared mappers for parsing and extracting data from
``/etc/yum/pluginconf.d/*.conf`` files. Parsers contained
in this module are:
PluginConfD - files ``/etc/yum/pluginconf.d/*.conf``
---------------------------------------------------
PluginConfDIni - files ``/etc/yum/pluginconf.d/*.conf``
-------------------------------------------------------
"""
from insights.core import IniConfigFile, LegacyItemAccess, Parser
from insights.core.plugins import parser
from insights.parsers import get_active_lines
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.pluginconf_d)
class PluginConfD(LegacyItemAccess, Parser):
"""
.. warning::
This parser is deprecated, please use
:py:class:`insights.parsers.pluginconf_d.PluginConfDIni` instead
Class to parse configuration file under ``pluginconf.d``
Sample configuration::
[main]
enabled = 0
gpgcheck = 1
timeout = 120
# You can specify options per channel, e.g.:
#
#[rhel-i386-server-5]
#enabled = 1
#
#[some-unsigned-custom-channel]
#gpgcheck = 0
"""
def parse_content(self, content):
deprecated(PluginConfD, "Deprecated. Use 'PluginConfDIni' instead.")
plugin_dict = {}
section_dict = {}
key = None
for line in get_active_lines(content):
if line.startswith('['):
section_dict = {}
plugin_dict[line[1:-1]] = section_dict
elif '=' in line:
key, _, value = line.partition("=")
key = key.strip()
section_dict[key] = value.strip()
else:
if key:
section_dict[key] = ','.join([section_dict[key], line])
self.data = plugin_dict
def __iter__(self):
for sec in self.data:
yield sec
@parser(Specs.pluginconf_d)
class PluginConfDIni(IniConfigFile):
"""
Read yum plugin config files, in INI format, using the standard INI file
parser class.
Sample configuration::
[main]
enabled = 0
gpgcheck = 1
timeout = 120
# You can specify options per channel, e.g.:
#
#[rhel-i386-server-5]
#enabled = 1
#
#[some-unsigned-custom-channel]
#gpgcheck = 0
[test]
test_multiline_config = http://example.com/repos/test/
http://mirror_example.com/repos/test/
Examples:
>>> type(conf)
<class 'insights.parsers.pluginconf_d.PluginConfDIni'>
>>> conf.sections()
['main', 'test']
>>> conf.has_option('main', 'gpgcheck')
True
>>> conf.get("main", "enabled")
'0'
>>> conf.getint("main", "timeout")
120
>>> conf.getboolean("main", "enabled")
False
>>> conf.get("test", "test_multiline_config")
'http://example.com/repos/test/ http://mirror_example.com/repos/test/'
"""
pass
| RedHatInsights/insights-core | insights/parsers/pluginconf_d.py | Python | apache-2.0 | 3,141 |
from cobra.core.loading import get_model
from cobra.core import json
class UserConfig(object):
default_config = {
'guide.task.participant': '1',
'guide.document.share': '1',
'guide.customer.share': '1',
'guide.workflow.operation': '1',
'guide.workflow.createform': '1',
'order.task.search': 'default',
'order.task.searchDirection': 'DESC',
'portal.workdyna': 'subordinates-task',
'system.menu.display':'',
'viewState.task': 'list',
'guide.biaoge.showintro': '1',
'workreport.push.set': '1',
'agenda.push.set': '1'
}
def __init__(self, user):
self.__user_config = self.__build_user_config(user)
def __build_user_config(self, user):
UserOption = get_model('option', 'UserOption')
u_c = {}
for k, v in self.default_config.items():
u_c[k] = UserOption.objects.get_value(user, None, k, v)
return u_c
def to_python(self):
configs = []
for k, v in self.__user_config.items():
m = {
'configKey': k,
'configValue': v
}
configs.append(m)
return configs
def to_json(self):
return json.dumps(self.to_python()) | lyoniionly/django-cobra | src/cobra/core/configure/user_config.py | Python | apache-2.0 | 1,284 |
# Copyright 2017 The Vispek Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==========================================================================
""" Example code about how to run raw_file_io
python3 -m vispek.examples.run_raw_file_io \
--in_path /Users/huaminli/Downloads/data \
--out_path /Users/huaminli/Desktop/vispek/data
"""
import argparse
from vispek.lib.io.raw_file_io import RawFileIO
def run_file_io(args):
my_file_io = RawFileIO(args.in_path, args.out_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Example code about how tun run raw_file_io')
parser.add_argument(
'--in_path', type=str,
help='absolute path to the directories that contains raw csv files')
parser.add_argument(
'--out_path', type=str,
help='absolute path to the directories that contains ' +
'preproceed files')
args = parser.parse_args()
print(args.in_path)
print(args.out_path)
run_file_io(args)
| hl475/vispek | examples/run_raw_file_io.py | Python | apache-2.0 | 1,604 |
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB
from pns.app import app, db
class SerializationMixin():
"""serialization mixin for sqlalchemy model object
"""
def to_dict(self, *exceptions, **extra_payload):
"""get dict representation of the object
:param list exceptions: a list to discard from dict
:param dict extra_payload: new parameters to add to dict
"""
_dict = ({c.name: getattr(self, c.name) for c in self.__table__.columns
if c.name not in exceptions})
_dict.update(**extra_payload)
return _dict
subscriptions = db.Table('subscriptions',
db.Column('user_id', db.Integer, db.ForeignKey('user.id'), nullable=False),
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
UniqueConstraint('user_id', 'channel_id'))
channel_devices = db.Table('channel_devices',
db.Column('channel_id', db.Integer, db.ForeignKey('channel.id'), nullable=False),
db.Column('device_id', db.Integer, db.ForeignKey('device.id'), nullable=False),
UniqueConstraint('channel_id', 'device_id'))
class User(db.Model, SerializationMixin):
"""user resource
"""
id = db.Column(db.Integer, primary_key=True)
# pns_id is a unique identifier for easy third-party integration (email, citizen id etc.)
pns_id = db.Column(db.String(255), unique=True, nullable=False)
subscriptions = db.relationship('Channel',
secondary=subscriptions,
lazy='dynamic',
backref=db.backref('subscribers', lazy='dynamic'))
devices = db.relationship('Device', backref='user', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<User %r>' % self.id
class Channel(db.Model, SerializationMixin):
"""channel resource
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), unique=True, nullable=False)
description = db.Column(db.Text)
devices = db.relationship('Device',
secondary=channel_devices,
lazy='dynamic',
backref=db.backref('channels', lazy='dynamic'))
alerts = db.relationship('Alert', backref='channel', lazy='dynamic',
cascade='all, delete, delete-orphan')
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_user(self, user):
try:
self.subscribers.append(user)
for device in user.devices.all():
self.devices.append(device)
db.session.add(self)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def unsubscribe_user(self, user):
try:
self.subscribers.remove(user)
for device in user.devices.all():
self.devices.remove(device)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Channel %r>' % self.id
class Alert(db.Model, SerializationMixin):
"""alert resource
"""
id = db.Column(db.Integer, primary_key=True)
channel_id = db.Column(db.Integer, db.ForeignKey('channel.id'), index=True)
payload = db.Column(JSONB, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def __repr__(self):
return '<Alert %r>' % self.id
class Device(db.Model, SerializationMixin):
"""device resource
"""
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
platform = db.Column(db.String(10), index=True, nullable=False)
platform_id = db.Column(db.Text, unique=True, nullable=False)
mobile_app_id = db.Column(db.Text, index=True)
mobile_app_ver = db.Column(db.Integer, index=True)
mute = db.Column(db.Boolean, default=False, nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
def subscribe_to_channels(self):
"""subscribe new device to existing channels
"""
try:
for channel in self.user.subscriptions.all():
channel.devices.append(self)
db.session.add(self.user)
db.session.commit()
except Exception as ex:
db.session.rollback()
app.logger.exception(ex)
return False
return True
def __repr__(self):
return '<Device %r>' % self.id
if __name__ == '__main__':
db.create_all()
| Turksat/pns | pns/models.py | Python | apache-2.0 | 5,438 |
# -*- coding: UTF-8; indent-tabs-mode:nil; tab-width:4 -*-
# This file is part of DITA DTD Generator.
#
# Copyright 2009 Jarno Elovirta <http://www.elovirta.com/>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ditagen.dita
from ditagen.dtdgen import Particle as Particle
from ditagen.dtdgen import Choice as Choice
from ditagen.dtdgen import Name as Name
from ditagen.dtdgen import Seq as Seq
from ditagen.dtdgen import Attribute as Attribute
from ditagen.dtdgen import Param as Param
from ditagen.dtdgen import ParameterEntity as ParameterEntity
# Elements
#####################################################################
OPTIONAL = Particle.Occurrences.OPTIONAL
ZERO_OR_MORE = Particle.Occurrences.ZERO_OR_MORE
class TopicElement(ditagen.dita.DitaElement):
"""Topic element."""
name = u"topic"
cls = u"- topic/topic "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("body"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ConceptElement(ditagen.dita.DitaElement):
"""Concept element."""
name = u"concept"
cls = u"- topic/topic concept/concept "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("conbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class TaskElement(ditagen.dita.DitaElement):
"""Task element."""
name = u"task"
cls = u"- topic/topic task/task "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("taskbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class ReferenceElement(ditagen.dita.DitaElement):
"""Reference element."""
name = u"reference"
cls = u"- topic/topic reference/reference "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("refbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossentryElement(ditagen.dita.DitaElement):
"""Glossary entry element."""
name = u"glossentry"
cls = u"- topic/topic concept/concept glossentry/glossentry "
model = Seq([
Choice(ParameterEntity("glossterm")),
Choice(ParameterEntity("glossdef"), OPTIONAL),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("glossBody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class GlossgroupElement(ditagen.dita.DitaElement):
"""Glossary group element."""
name = u"glossgroup"
cls = u"- topic/topic concept/concept glossgroup/glossgroup "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningBaseElement(ditagen.dita.DitaElement):
"""Learning Base element."""
name = u"learningBase"
cls = u"- topic/topic learningBase/learningBase "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningBasebody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
]
class LearningAssessmentElement(ditagen.dita.DitaElement):
"""Learning Assessment element."""
name = u"learningAssessment"
cls = u"- topic/topic learningBase/learningBase learningAssessment/learningAssessment "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningAssessmentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningOverviewElement(ditagen.dita.DitaElement):
"""Learning Overview element."""
name = u"learningOverview"
cls = u"- topic/topic learningBase/learningBase learningOverview/learningOverview "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningOverviewbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningPlanElement(ditagen.dita.DitaElement):
"""Learning Plan element."""
name = u"learningPlan"
cls = u"- topic/topic learningBase/learningBase learningPlan/learningPlan "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningPlanbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningSummaryElement(ditagen.dita.DitaElement):
"""Learning Summary element."""
name = u"learningSummary"
cls = u"- topic/topic learningBase/learningBase learningSummary/learningSummary "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningSummarybody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class LearningContentElement(ditagen.dita.DitaElement):
"""Learning Content element."""
name = u"learningContent"
cls = u"- topic/topic learningBase/learningBase learningContent/learningContent "
model = Seq([
Choice(ParameterEntity("title")),
Choice(ParameterEntity("titlealts"), OPTIONAL),
Choice([ParameterEntity("shortdesc"), ParameterEntity("abstract")], Param("shortdesc")),
Choice(ParameterEntity("prolog"), OPTIONAL),
Choice(ParameterEntity("learningContentbody"), OPTIONAL),
Choice(ParameterEntity("related-links"), OPTIONAL),
Param("nested")
])
attrs = [
Attribute("id", "ID" ,"#REQUIRED"),
ParameterEntity("conref-atts"),
ParameterEntity("select-atts"),
ParameterEntity("localization-atts"),
Attribute("outputclass", "CDATA", "#IMPLIED")
]
class SubjectSchemeElement(ditagen.dita.DitaElement):
"""Subject scheme element."""
name = u"subjectScheme"
cls = u"- map/map subjectScheme/subjectScheme "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("enumerationdef"),
ParameterEntity("hasInstance"),
ParameterEntity("hasKind"),
ParameterEntity("hasNarrower"),
ParameterEntity("hasPart"),
ParameterEntity("hasRelated"),
ParameterEntity("navref"),
ParameterEntity("relatedSubjects"),
ParameterEntity("reltable"),
ParameterEntity("schemeref"),
ParameterEntity("subjectdef"),
ParameterEntity("subjectHead"),
ParameterEntity("subjectRelTable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class MapElement(ditagen.dita.DitaElement):
"""Map element."""
name = u"map"
cls = u"- map/map "
model = Seq([
Choice(ParameterEntity("title"), OPTIONAL),
Choice(ParameterEntity("topicmeta"), OPTIONAL),
Choice([
ParameterEntity("anchor"),
ParameterEntity("data.elements.incl"),
ParameterEntity("navref"),
ParameterEntity("reltable"),
ParameterEntity("topicref")
], ZERO_OR_MORE)
])
attrs = [
Attribute("title", "CDATA", "#IMPLIED"),
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
class BookMapElement(ditagen.dita.DitaElement):
"""BookMap element."""
name = u"bookmap"
cls = u"- map/map bookmap/bookmap "
model = Seq([
Choice([Choice(ParameterEntity("title")), Choice(ParameterEntity("booktitle"))], OPTIONAL),
Choice(ParameterEntity("bookmeta"), OPTIONAL),
Choice(ParameterEntity("frontmatter"), OPTIONAL),
Choice(ParameterEntity("chapter"), ZERO_OR_MORE),
Choice(ParameterEntity("part"), ZERO_OR_MORE),
Choice([Choice(ParameterEntity("appendices"), OPTIONAL), Choice(ParameterEntity("appendix"), ZERO_OR_MORE)]),
Choice(ParameterEntity("backmatter"), OPTIONAL),
Choice(ParameterEntity("reltable"), ZERO_OR_MORE)
])
attrs = [
Attribute("id", "ID", "#REQUIRED"),
ParameterEntity("conref-atts"),
Attribute("anchorref", "CDATA", "#IMPLIED"),
Attribute("outputclass", "CDATA", "#IMPLIED"),
ParameterEntity("localization-atts"),
ParameterEntity("topicref-atts"),
ParameterEntity("select-atts")
]
# Topic types
#####################################################################
class TopicType(ditagen.dita.Type):
"""Topic topic type."""
id = u"topic"
file = u"base/dtd/topic" # the .dtd file is at technicalContent
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Topic//EN"
title = u"Topic"
parent = None
root = TopicElement()
class ConceptType(TopicType):
"""Concept topic type."""
id = u"concept"
file = u"technicalContent/dtd/concept"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Concept//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Concept//EN"
title = u"Concept"
parent = TopicType()
root = ConceptElement()
class TaskType(TopicType):
"""Task topic type."""
id = u"task"
file = u"technicalContent/dtd/task"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task//EN"
title = u"Task"
parent = TopicType()
root = TaskElement()
def __init__(self):
super(TaskType, self).__init__()
#self.required_domains = [StrictTaskbodyConstraints]
class GeneralTaskType(ditagen.dita.ShellType):
"""General Task topic type."""
def __init__(self):
super(GeneralTaskType, self).__init__(u"generalTask", u"General Task", TaskType())
#self.parent.required_domains = []
class ReferenceType(TopicType):
"""Reference topic type."""
id = u"reference"
file = u"technicalContent/dtd/reference"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Reference//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Reference//EN"
title = u"Reference"
parent = TopicType()
root = ReferenceElement()
class MapType(ditagen.dita.Type):
"""Map topic type."""
id = u"map"
file = u"base/dtd/map" # the .dtd file is at technicalContent
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map//EN"
title = u"Map"
parent = None
root = MapElement()
class BookMapType(MapType):
"""BookMap topic type."""
id = u"bookmap"
file = u"bookmap/dtd/bookmap"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 BookMap//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 BookMap//EN"
title = u"BookMap"
parent = MapType()
root = BookMapElement()
class GlossentryType(ConceptType):
"""Glossary entry topic type."""
id = u"glossentry"
file = u"technicalContent/dtd/glossentry"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Entry//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Entry//EN"
title = u"Glossary Entry"
parent = ConceptType()
root = GlossentryElement()
class GlossgroupType(ConceptType):
"""Glossary group topic type."""
id = u"glossgroup"
file = u"technicalContent/dtd/glossgroup"
pi_entity = u"-//OASIS//ENTITIES DITA Glossary Group//EN"
pi_module = u"-//OASIS//ELEMENTS DITA Glossary Group//EN"
title = u"Glossary Group"
parent = ConceptType()
root = GlossgroupElement()
class MachineryTaskType(ditagen.dita.ShellType):
"""Machinery Task topic type."""
def __init__(self):
super(MachineryTaskType, self).__init__(u"machineryTask", u"Machinery Task", TaskType(), file=u"machineryIndustry/dtd/machineryTask")
#self.parent.required_domains = [MachineryTaskbodyConstraints]
class LearningBaseType(TopicType):
"""Learning Base topic type."""
id = u"learningBase"
file = u"learning/dtd/learningBase"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Base//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Base//EN"
title = u"Learning Base"
parent = TopicType()
root = LearningBaseElement()
class LearningAssessmentType(LearningBaseType):
"""Learning Assessment topic type."""
id = u"learningAssessment"
file = u"learning/dtd/learningAssessment"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Assessment//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Assessment//EN"
title = u"Learning Assessment"
parent = LearningBaseType()
root = LearningAssessmentElement()
class LearningOverviewType(LearningBaseType):
"""Learning Overview topic type."""
id = u"learningOverview"
file = u"learning/dtd/learningOverview"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Overview//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Overview//EN"
title = u"Learning Overview"
parent = LearningBaseType()
root = LearningOverviewElement()
class LearningPlanType(LearningBaseType):
"""Learning Plan topic type."""
id = u"learningPlan"
file = u"learning/dtd/learningPlan"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Plan//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Plan//EN"
title = u"Learning Plan"
parent = LearningBaseType()
root = LearningPlanElement()
class LearningSummaryType(LearningBaseType):
"""Learning Summary topic type."""
id = u"learningSummary"
file = u"learning/dtd/learningSummary"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Summary//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Summary//EN"
title = u"Learning Summary"
parent = LearningBaseType()
root = LearningSummaryElement()
class LearningContentType(LearningBaseType):
"""Learning Content topic type."""
id = u"learningContent"
file = u"learning/dtd/learningContent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Content//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Content//EN"
title = u"Learning Content"
parent = LearningBaseType()
root = LearningContentElement()
def __init__(self):
super(LearningContentType, self).__init__()
self.required_types = [TaskType, ConceptType, ReferenceType, LearningSummaryType, LearningAssessmentType]
class LearningMapType(ditagen.dita.ShellType):
"""Learning Map topic type."""
def __init__(self):
super(LearningMapType, self).__init__(u"learningMap", u"Learning Map", MapType(), file=u"learning/dtd/learningMap")
#self.parent.required_domains = []
class LearningBookMapType(ditagen.dita.ShellType):
"""Learning BookMap topic type."""
def __init__(self):
super(LearningBookMapType, self).__init__(u"learningBookmap", u"Learning BookMap", BookMapType(), file=u"learning/dtd/learningBookmap")
#self.parent.required_domains = []
class ClassificationMapType(ditagen.dita.ShellType):
"""Classification Map topic type."""
def __init__(self):
super(ClassificationMapType, self).__init__(u"classifyMap", u"Classification Map", MapType(), file=u"subjectScheme/dtd/classifyMap")
#self.parent.required_domains = []
class SubjectSchemeType(MapType):
"""Subject Scheme Map topic type."""
id = u"subjectScheme"
file = u"subjectScheme/dtd/subjectScheme"
title = u"Subject Scheme Map"
parent = MapType()
root = SubjectSchemeElement()
# Domains
#####################################################################
class Constraints(ditagen.dita.DomainBase):
"""Base class for constraints."""
# file_suffix = u""
pi_suffix = u" Constraint"
elements = []
att_id = None
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
class AttributeDomain(ditagen.dita.DomainBase):
"""Base class for attribute domains."""
# file_suffix = u"Att"
pi_suffix = u" Attribute Domain"
#elements = []
attributes = []
def get_file_name(self, extension):
return self.file + self.file_suffix + "." + extension
# Domains
class UiDomain(ditagen.dita.Domain):
"""User interface domain."""
id = u"ui-d"
si_module = u"technicalContent/dtd/uiDomain.mod"
si_entity = u"technicalContent/dtd/uiDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 User Interface Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 User Interface Domain//EN"
title = u"User Interface"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class HiDomain(ditagen.dita.Domain):
"""Hilight domain."""
id = u"hi-d"
si_module = u"base/dtd/highlightDomain.mod"
si_entity = u"base/dtd/highlightDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Highlight Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Highlight Domain//EN"
title = u"Highlight"
elements = [u"ph"]
parent = [TopicType]
class PrDomain(ditagen.dita.Domain):
"""Programmign domain."""
id = u"pr-d"
si_module = u"technicalContent/dtd/programmingDomain.mod"
si_entity = u"technicalContent/dtd/programmingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Programming Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Programming Domain//EN"
title = u"Programming"
elements = [u"pre", u"keyword", u"ph", u"fig", u"dl"]
parent = [TopicType]
class SwDomain(ditagen.dita.Domain):
"""Software development domain."""
id = u"sw-d"
si_module = u"technicalContent/dtd/softwareDomain.mod"
si_entity = u"technicalContent/dtd/softwareDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Software Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Software Domain//EN"
title = u"Software"
elements = [u"pre", u"keyword", u"ph"]
parent = [TopicType]
class UtDomain(ditagen.dita.Domain):
"""Utilities domain."""
id = u"ut-d"
si_module = u"base/dtd/utilitiesDomain.mod"
si_entity = u"base/dtd/utilitiesDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Utilities Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Utilities Domain//EN"
title = u"Utilities"
elements = [u"fig"]
parent = [TopicType]
class IndexingDomain(ditagen.dita.Domain):
"""Indexing domain."""
id = u"indexing-d"
si_module = u"base/dtd/indexingDomain.mod"
si_entity = u"base/dtd/indexingDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Indexing Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Indexing Domain//EN"
title = u"Indexing"
elements = [u"index-base"]
parent = [TopicType, MapType]
class LearningDomain(ditagen.dita.Domain):
"""Learning domain."""
id = u"learning-d"
si_module = u"learning/dtd/learningDomain.mod"
si_entity = u"learning/dtd/learningDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Domain//EN"
title = u"Learning"
elements = [u"note", u"fig"]
# XXX: This builds on
parent = [TopicType]
required_domains = [UtDomain]
class LearningMetaDomain(ditagen.dita.Domain):
"""Learning metadata domain."""
id = u"learningmeta-d"
si_module = u"learning/dtd/learningMetadataDomain.mod"
si_entity = u"learning/dtd/learningMetadataDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Metadata Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Metadata Domain//EN"
title = u"Learning Metadata"
elements = [u"metadata"]
parent = [TopicType]
class LearningMapDomain(ditagen.dita.Domain):
"""Learning map domain."""
id = u"learningmap-d"
si_module = u"learning/dtd/learningMapDomain.mod"
si_entity = u"learning/dtd/learningMapDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Learning Map Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Learning Map Domain//EN"
title = u"Learning Map"
elements = [u"topicref"]
parent = [MapType]
class TaskRequirementsDomain(ditagen.dita.Domain):
"""Task requirements domain."""
id = u"taskreq-d"
si_module = u"technicalContent/dtd/taskreqDomain.mod"
si_entity = u"technicalContent/dtd/taskreqDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Task Requirements Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Task Requirements Domain//EN"
title = u"Machine Industry Task"
elements = [u"prereq", u"postreq"]
parent = [TaskType]
class HazardStatementDomain(ditagen.dita.Domain):
"""Hazard statement domain."""
id = u"hazard-d"
si_module = u"base/dtd/hazardstatementDomain.mod"
si_entity = u"base/dtd/hazardstatementDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Hazard Statement Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Hazard Statement Domain//EN"
title = u"Hazard Statement"
elements = [u"note"]
parent = [TopicType]
class MapGroupDomain(ditagen.dita.Domain):
"""Map group domain."""
id = u"mapgroup-d"
si_module = u"base/dtd/mapGroup.mod"
si_entity = u"base/dtd/mapGroup.ent" # This is an exception to DITA's naming scheme
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Map Group Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Map Group Domain//EN"
title = u"Map Group"
elements = [u"topicref"]
parent = [MapType]
class AbbreviatedFormDomain(ditagen.dita.Domain):
"""Abbreviated form domain."""
id = u"abbrev-d"
si_module = u"technicalContent/dtd/abbreviateDomain.mod"
si_entity = u"technicalContent/dtd/abbreviateDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Abbreviated Form Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Abbreviated Form Domain//EN"
title = u"Abbreviated Form"
elements = [u"term"]
parent = [TopicType]
class XNALDomain(ditagen.dita.Domain):
"""XNAL domain."""
id = u"xnal-d"
si_module = u"xnal/dtd/xnalDomain.mod"
si_entity = u"xnal/dtd/xnalDomain.ent"
title = u"XNAL"
elements = [u"author"]
parent = [MapType]
class UserDelayedResolutionDomain(ditagen.dita.Domain):
"""User delayed resolution domain."""
id = u"delay-d"
si_module = u"base/dtd/delayResolutionDomain.mod"
si_entity = u"base/dtd/delayResolutionDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Delayed Resolution Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Delayed Resolution Domain//EN"
title = u"Delayed Resolution"
elements = [u"keywords"]
parent = [TopicType, MapType]
class ClassifyDomain(ditagen.dita.Domain):
"""Classify domain."""
id = u"classify-d"
si_module = u"subjectScheme/dtd/classifyDomain.mod"
si_entity = u"subjectScheme/dtd/classifyDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Classification Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Classification Domain//EN"
title = u"Map Subject Classification"
elements = [u"topicref", u"reltable"]
parent = [TopicType, MapType]
class GlossaryReferenceDomain(ditagen.dita.Domain):
"""Glossary reference domain."""
id = u"glossref-d"
si_module = u"technicalContent/dtd/glossrefDomain.mod"
si_entity = u"technicalContent/dtd/glossrefDomain.ent"
pi_entity = u"-//OASIS//ENTITIES DITA 1.2 Glossary Reference Domain//EN"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Glossary Reference Domain//EN"
title = u"Glossary Reference"
elements = [u"topicref"]
parent = [MapType]
# Constraints
class StrictTaskbodyConstraints(Constraints):
"""Strict taskbody constraints."""
id = u"strictTaskbody-c"
si_module = u"technicalContent/dtd/strictTaskbodyConstraint.mod"
si_entity = u"technicalContent/dtd/strictTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Strict Taskbody Constraint//EN"
title = u"Strict Taskbody"
parent = [TaskType]
att_id = u"taskbody"
class MachineryTaskbodyConstraints(Constraints):
"""Machinery taskbody constraints."""
id = u"machineryTaskbody-c"
si_module = u"machineryIndustry/dtd/machineryTaskbodyConstraint.mod"
si_entity = u"machineryIndustry/dtd/machineryTaskbodyConstraint.ent"
pi_module = u"-//OASIS//ELEMENTS DITA 1.2 Machinery Taskbody Constraint//EN"
title = u"Machinery Taskbody"
parent = [TaskType]
att_id = u"taskbody"
# Defaults
TopicType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ConceptType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
TaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain, StrictTaskbodyConstraints]
GeneralTaskType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
ReferenceType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
MachineryTaskType.default_domains = [TaskRequirementsDomain, HazardStatementDomain, HiDomain, UtDomain, IndexingDomain, PrDomain, SwDomain, UiDomain, MachineryTaskbodyConstraints]
MapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, GlossaryReferenceDomain]
BookMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
ClassificationMapType.default_domains = [MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, ClassifyDomain]
SubjectSchemeType.default_domains = [MapGroupDomain]
LearningAssessmentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningBookMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain, XNALDomain]
LearningContentType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningMapType.default_domains = [LearningMapDomain, LearningMetaDomain, MapGroupDomain, IndexingDomain, UserDelayedResolutionDomain]
LearningOverviewType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningPlanType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
LearningSummaryType.default_domains = [LearningDomain, LearningMetaDomain, HiDomain, UtDomain, IndexingDomain]
GlossentryType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
GlossgroupType.default_domains = [HiDomain, UtDomain, IndexingDomain, HazardStatementDomain, AbbreviatedFormDomain, PrDomain, SwDomain, UiDomain]
| jelovirt/dita-generator | src/ditagen/dita/v1_2.py | Python | apache-2.0 | 32,591 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-30 12:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webcore', '0016_profile_emails'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='emails',
),
]
| Nikita1710/ANUFifty50-Online-Mentoring-Platform | project/fifty_fifty/webcore/migrations/0017_remove_profile_emails.py | Python | apache-2.0 | 388 |
from distutils.core import setup
PKGLIST = ['gearman_geodis']
setup(name='gearman-geodis',
version='1.0.0',
description='Geolocation Gearman worker powered by Geodis',
author_email='[email protected]',
license='Apache License, Version 2.0',
packages=PKGLIST,
scripts=['gearman_geodis/geodis_worker.py', 'gearman_geodis/gearman_geodisd.py', 'gearman_geodis/stdin_geodis_worker.py'],
data_files=[('/etc/sysconfig/',['support/gearman_geodis.sysconfig']),
('/etc/init.d/',['support/gearman_geodis'])]
)
| shazamengineering/gearman-geodis | setup.py | Python | apache-2.0 | 570 |
from distutils.core import setup
from src import __version__
setup(
name="irma.common",
version=__version__,
author="Quarkslab",
author_email="[email protected]",
description="The common component of the IRMA software",
packages=["irma.common",
"irma.common.base",
"irma.common.utils",
"irma.common.configuration",
"irma.common.ftp",
"irma.common.plugins"],
package_dir={"irma.common": "src",
"irma.common.utils": "src/utils",
"irma.common.base": "src/base",
"irma.common.plugins": "src/plugins"},
namespace_packages=["irma"]
)
| quarkslab/irma | common/setup.py | Python | apache-2.0 | 683 |
# -*- coding: utf-8 -*-
import hashlib
import random
from rest_framework import serializers
from sita.users.models import User
from sita.subscriptions.models import Subscription
from sita.utils.refresh_token import create_token
from hashlib import md5
from datetime import datetime, timedelta
import pytz
class LoginSerializer(serializers.Serializer):
"""
Serializer for user login
"""
email = serializers.EmailField(
required=True
)
password = serializers.CharField(
required=True
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError({"email":"invalid credentials"})
if not user.check_password(data.get('password')):
raise serializers.ValidationError({"email":"invalid credentials"})
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def get_user(self, data):
"""
return user object
"""
return User.objects.get(email__exact=data.get('email'))
class SignUpSerializer(serializers.Serializer):
""""""
TYPE_OS = (
('1', 'IOS'),
('2', 'ANDROID')
)
email = serializers.EmailField(
max_length=254,
required=True
)
password = serializers.CharField(
max_length=100,
required=True
)
time_zone = serializers.CharField(
max_length=100,
required=True
)
name = serializers.CharField(
required=False,
max_length = 100
)
phone = serializers.CharField(
required=False,
max_length=10
)
device_os= serializers.ChoiceField(
required=False,
choices=['ANDROID', 'IOS']
)
device_token= serializers.CharField(
required=False,
max_length=254
)
conekta_card = serializers.CharField(
max_length=254,
required=False
)
subscription_id= serializers.IntegerField(
required=False
)
def validate(self, data):
if data.get("device_os") or data.get("device_token"):
if not data.get("device_os") or not data.get("device_token"):
raise serializers.ValidationError(
{"device_token":"Don`t send device OS or device token"})
if data.get("conekta_card"):
if not data.get("phone") or not data.get("name") or not data.get("subscription_id"):
raise serializers.ValidationError(
{"conekta_card":
"If send conektaCard you should send phone and name"})
try:
subscription = Subscription.objects.get(id=data.get('subscription_id'))
except Subscription.DoesNotExist:
raise serializers.ValidationError(
{"subscription_id":"That subscription don't exists"}
)
try:
user = User.objects.get(email__exact=data.get('email'))
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
except User.DoesNotExist:
pass
try:
datetime.now(pytz.timezone(data.get("time_zone")))
except pytz.UnknownTimeZoneError:
raise serializers.ValidationError(
{"time_zone":"The time zone is not correct"}
)
return data
class LoginResponseSerializer(object):
"""
Serializer used to return the proper token, when the user was succesfully
logged in.
"""
def __init__(self):
pass
def get_token(self,obj):
"""
Create token.
"""
return create_token(obj)
class RecoveryPasswordSerializer(serializers.Serializer):
"""
Serializer for user recovery password
"""
email = serializers.EmailField(
required=True
)
def validate(self, data):
"""
Validation email and active status
"""
try:
user = User.objects.get(email__exact=data.get('email'))
except User.DoesNotExist:
raise serializers.ValidationError("invalid credentials")
if not user.is_active:
raise serializers.ValidationError(
{"email":"The user is not actived"}
)
return data
def generate_recovery_token(self, data):
""" Generate code to recovery password. """
user = User.objects.get(email__exact=data.get('email'))
email = user.email
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
if isinstance(email, unicode):
email = email.encode('utf-8')
key = hashlib.sha1(salt + email).hexdigest()
user.reset_pass_code = key
user.save()
return True
class ResetPasswordWithCodeSerializer(serializers.Serializer):
"""
Serializer for user login
"""
password = serializers.CharField(
required=True
)
password_confim = serializers.CharField(
required=True
)
recovery_code = serializers.CharField(
required=True
)
def validate(self, data):
"""
Validation email, password and active status
"""
try:
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
except User.DoesNotExist:
raise serializers.ValidationError(
{"recovery_code":"Don't exits code"})
if not data.get('password') == data.get('password_confim'):
raise serializers.ValidationError(
{"password_confim":
"Password is not equals to Confirm Password"})
return data
def update_password(self, data):
"""
Change password
"""
user = User.objects.get(reset_pass_code=data.get('recovery_code'))
user.reset_pass_code = None
user.set_password(data.get('password'))
user.save()
return True
| Fabfm4/Sita-BackEnd | src/sita/authentication/serializers.py | Python | apache-2.0 | 6,676 |
# Copyright 2016-2018 Michael Peters
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cProfile
from scipy.stats import norm
# annotate a function with @profile to see where it's spending the most time
def profile(func):
def profiled_func(*args, **kwargs):
p = cProfile.Profile()
try:
p.enable()
result = func(*args, **kwargs)
p.disable()
return result
finally:
p.print_stats()
return profiled_func
# annotate a function with @print_models
def print_models(func):
def printed_func(*args, **kwargs):
model = func(*args, **kwargs)
cv_keys = ('mean_test_score', 'std_test_score', 'params')
for r, _ in enumerate(model.cv_results_['mean_test_score']):
print("%0.3f +/- %0.2f %r" % (model.cv_results_[cv_keys[0]][r],
model.cv_results_[cv_keys[1]][r] / 2.0,
model.cv_results_[cv_keys[2]][r]))
print('Best parameters: %s' % model.best_params_)
print('Best accuracy: %.2f' % model.best_score_)
return model
return printed_func
# https://www.pro-football-reference.com/about/win_prob.htm
def mov_to_win_percent(u, m=11, offset=0):
u = u + offset
return 1 - norm.cdf(0.5, loc=u, scale=m) + .5 * (norm.cdf(0.5, loc=u, scale=m) - norm.cdf(-0.5, loc=u, scale=m))
| opiethehokie/march-madness-predictions | ml/util.py | Python | apache-2.0 | 1,932 |
import sys
import numpy as np
from normalization import tokenize
from helpers import ahash
class KerasVectorizer():
'''
Convert list of documents to numpy array for input into Keras model
'''
def __init__(self, n_features=100000, maxlen=None, maxper=100, hash_function=ahash):
self.maxlen = maxlen
self.maxper = maxper
self.n_features = n_features
self.hash_function = hash_function
def _exact_hash(self, word, n_features):
return self.token_lookup.get(word, 0)
def fit_transform(self, raw_documents, y=None, suffix='', verbose=True):
if verbose:
print >> sys.stderr, 'splitting raw documents'
# Some way to print progress?
tokens = map(self._split_function, raw_documents)
if self.maxlen:
maxlen = self.maxlen
else:
maxlen = int(np.percentile(map(len, tokens), self.maxper))
self.maxlen = maxlen
X = np.zeros((len(tokens), maxlen))
for i,t in enumerate(tokens):
if verbose:
if not i % 10000:
print >> sys.stderr, 'processed %d tokens' % i
if len(t) > 0:
X[i,-len(t):] = map(lambda x: self.hash_function(x + suffix, self.n_features), t[:maxlen])
return X
class KerasCharacterVectorizer(KerasVectorizer):
'''
Split a string into characters
'''
def _split_function(self, doc):
return list(doc)
class KerasTokenVectorizer(KerasVectorizer):
'''
Split a string into words,
'''
def _split_function(self, doc):
return tokenize(doc, keep_punctuation=True)
class KerasPretokenizedVectorizer(KerasVectorizer):
def _split_function(self, doc):
return doc
'''
from keras_vectorizer import KerasTokenVectorizer, KerasCharacterVectorizer
ktv = KerasTokenVectorizer()
ktv.fit_transform(['this is a test'])
ktv.fit_transform(['this is a test', 'this is a another test'])
ktv = KerasTokenVectorizer(maxlen=2)
ktv.fit_transform(['this is a test', 'this is a another test'])
kcv = KerasCharacterVectorizer()
kcv.fit_transform(['something', 'else'])
'''
| gophronesis/smlib | smlib/keras_vectorizer.py | Python | apache-2.0 | 2,229 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ansible.module_utils.basic import AnsibleModule
import git
import itertools
import multiprocessing
import os
import signal
import time
DOCUMENTATION = """
---
module: git_requirements
short_description: Module to run a multithreaded git clone
options:
repo_info:
description:
- List of repo information dictionaries containing at
a minimum a key entry "src" with the source git URL
to clone for each repo. In these dictionaries, one
can further specify:
"path" - destination clone location
"version" - git version to checkout
"refspec" - git refspec to checkout
"depth" - clone depth level
"force" - require git clone uses "--force"
default_path:
description:
Default git clone path (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_version:
description:
Default git version (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "master". Not
required.
default_refspec:
description:
Default git repo refspec (str) in case not
specified on an individual repo basis in
repo_info. Defaults to "". Not required.
default_depth:
description:
Default clone depth (int) in case not specified
on an individual repo basis. Defaults to 10.
Not required.
retries:
description:
Integer number of retries allowed in case of git
clone failure. Defaults to 1. Not required.
delay:
description:
Integer time delay (seconds) between git clone
retries in case of failure. Defaults to 0. Not
required.
force:
description:
Boolean. Apply --force flags to git clones wherever
possible. Defaults to False. Not required.
core_multiplier:
description:
Integer multiplier on the number of cores
present on the machine to use for
multithreading. For example, on a 2 core
machine, a multiplier of 4 would use 8
threads. Defaults to 4. Not required.
"""
EXAMPLES = r"""
- name: Clone repos
git_requirements:
repo_info: "[{'src':'https://github.com/ansible/',
'name': 'ansible'
'dest': '/etc/opt/ansible'}]"
"""
def init_signal():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def check_out_version(repo, version, pull=False, force=False,
refspec=None, tag=False, depth=10):
try:
repo.git.fetch(tags=tag, force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to fetch %s\n%s" % (repo.working_dir, str(e))]
try:
repo.git.checkout(version, force=force)
except Exception as e:
return [
"Failed to check out version %s for %s\n%s" %
(version, repo.working_dir, str(e))]
if repo.is_dirty(untracked_files=True) and force:
try:
repo.git.clean(force=force)
except Exception as e:
return [
"Failed to clean up repository% s\n%s" %
(repo.working_dir, str(e))]
if pull:
try:
repo.git.pull(force=force, refspec=refspec, depth=depth)
except Exception as e:
return ["Failed to pull repo %s\n%s" % (repo.working_dir, str(e))]
return []
def pull_wrapper(info):
role_info = info
retries = info[1]["retries"]
delay = info[1]["delay"]
for i in range(retries):
success = pull_role(role_info)
if success:
return True
else:
time.sleep(delay)
info[2].append(["Role {0} failed after {1} retries\n".format(role_info[0],
retries)])
return False
def pull_role(info):
role, config, failures = info
required_version = role["version"]
version_hash = False
if 'version' in role:
# If the version is the length of a hash then treat is as one
if len(required_version) == 40:
version_hash = True
def get_repo(dest):
try:
return git.Repo(dest)
except Exception:
failtxt = "Role in {0} is broken/not a git repo.".format(
role["dest"])
failtxt += "Please delete or fix it manually"
failures.append(failtxt)
return False
# if repo exists
if os.path.exists(role["dest"]):
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
repo_url = list(repo.remote().urls)[0]
if repo_url != role["src"]:
repo.remote().set_url(role["src"])
# if they want master then fetch, checkout and pull to stay at latest
# master
if required_version == "master":
fail = check_out_version(repo, required_version, pull=True,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
# If we have a hash then reset it to
elif version_hash:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
# describe can fail in some cases so be careful:
try:
current_version = repo.git.describe(tags=True)
except Exception:
current_version = ""
if current_version == required_version and not config["force"]:
fail = []
pass
else:
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"],
tag=True)
else:
try:
# If we have a hash id then treat this a little differently
if version_hash:
git.Repo.clone_from(role["src"], role["dest"],
branch='master',
no_single_branch=True,
depth=role["depth"])
repo = get_repo(role["dest"])
if not repo:
return False # go to next role
fail = check_out_version(repo, required_version,
force=config["force"],
refspec=role["refspec"],
depth=role["depth"])
else:
git.Repo.clone_from(role["src"], role["dest"],
branch=required_version,
depth=role["depth"],
no_single_branch=True)
fail = []
except Exception as e:
fail = ('Failed cloning repo %s\n%s' % (role["dest"], str(e)))
if fail == []:
return True
else:
failures.append(fail)
return False
def set_default(dictionary, key, defaults):
if key not in dictionary.keys():
dictionary[key] = defaults[key]
def main():
# Define variables
failures = multiprocessing.Manager().list()
# Data we can pass in to the module
fields = {
"repo_info": {"required": True, "type": "list"},
"default_path": {"required": True,
"type": "str"},
"default_version": {"required": False,
"type": "str",
"default": "master"},
"default_refspec": {"required": False,
"type": "str",
"default": None},
"default_depth": {"required": False,
"type": "int",
"default": 10},
"retries": {"required": False,
"type": "int",
"default": 1},
"delay": {"required": False,
"type": "int",
"default": 0},
"force": {"required": False,
"type": "bool",
"default": False},
"core_multiplier": {"required": False,
"type": "int",
"default": 4},
}
# Pull in module fields and pass into variables
module = AnsibleModule(argument_spec=fields)
git_repos = module.params['repo_info']
defaults = {
"path": module.params["default_path"],
"depth": module.params["default_depth"],
"version": module.params["default_version"],
"refspec": module.params["default_refspec"]
}
config = {
"retries": module.params["retries"],
"delay": module.params["delay"],
"force": module.params["force"],
"core_multiplier": module.params["core_multiplier"]
}
# Set up defaults
for repo in git_repos:
for key in ["path", "refspec", "version", "depth"]:
set_default(repo, key, defaults)
if "name" not in repo.keys():
repo["name"] = os.path.basename(repo["src"])
repo["dest"] = os.path.join(repo["path"], repo["name"])
# Define varibles
failures = multiprocessing.Manager().list()
core_count = multiprocessing.cpu_count() * config["core_multiplier"]
# Load up process and pass in interrupt and core process count
p = multiprocessing.Pool(core_count, init_signal)
clone_success = p.map(pull_wrapper, zip(git_repos,
itertools.repeat(config),
itertools.repeat(failures)),
chunksize=1)
p.close()
success = all(i for i in clone_success)
if success:
module.exit_json(msg=str(git_repos), changed=True)
else:
module.fail_json(msg=("Module failed"), meta=failures)
if __name__ == '__main__':
main()
| stackforge/os-ansible-deployment | playbooks/library/git_requirements.py | Python | apache-2.0 | 10,270 |
Subsets and Splits