repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
chrisndodge/edx-platform | openedx/core/djangoapps/credit/api/provider.py | 25 | 16211 | """
API for initiating and tracking requests for credit from a provider.
"""
import datetime
import logging
import uuid
import pytz
from django.db import transaction
from lms.djangoapps.django_comment_client.utils import JsonResponse
from edx_proctoring.api import get_last_exam_completion_date
from openedx.core.djangoapps.credit.exceptions import (
UserIsNotEligible,
CreditProviderNotConfigured,
RequestAlreadyCompleted,
CreditRequestNotFound,
InvalidCreditStatus,
)
from openedx.core.djangoapps.credit.models import (
CreditProvider,
CreditRequirementStatus,
CreditRequest,
CreditEligibility,
)
from student.models import (
User,
CourseEnrollment,
)
from openedx.core.djangoapps.credit.signature import signature, get_shared_secret_key
from util.date_utils import to_timestamp
# TODO: Cleanup this mess! ECOM-2908
log = logging.getLogger(__name__)
def get_credit_providers(providers_list=None):
"""Retrieve all available credit providers or filter on given providers_list.
Arguments:
providers_list (list of strings or None): contains list of ids of credit providers
or None.
Returns:
list of credit providers represented as dictionaries
Response Values:
>>> get_credit_providers(['hogwarts'])
[
{
"id": "hogwarts",
"name": "Hogwarts School of Witchcraft and Wizardry",
"url": "https://credit.example.com/",
"status_url": "https://credit.example.com/status/",
"description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": false,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
},
...
]
"""
return CreditProvider.get_credit_providers(providers_list=providers_list)
def get_credit_provider_info(request, provider_id): # pylint: disable=unused-argument
"""Retrieve the 'CreditProvider' model data against provided
credit provider.
Args:
provider_id (str): The identifier for the credit provider
Returns: 'CreditProvider' data dictionary
Example Usage:
>>> get_credit_provider_info("hogwarts")
{
"provider_id": "hogwarts",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
"provider_url": "https://credit.example.com/",
"provider_status_url": "https://credit.example.com/status/",
"provider_description: "A new model for the Witchcraft and Wizardry School System.",
"enable_integration": False,
"fulfillment_instructions": "
<p>In order to fulfill credit, Hogwarts School of Witchcraft and Wizardry requires learners to:</p>
<ul>
<li>Sample instruction abc</li>
<li>Sample instruction xyz</li>
</ul>",
"thumbnail_url": "https://credit.example.com/logo.png"
}
"""
credit_provider = CreditProvider.get_credit_provider(provider_id=provider_id)
credit_provider_data = {}
if credit_provider:
credit_provider_data = {
"provider_id": credit_provider.provider_id,
"display_name": credit_provider.display_name,
"provider_url": credit_provider.provider_url,
"provider_status_url": credit_provider.provider_status_url,
"provider_description": credit_provider.provider_description,
"enable_integration": credit_provider.enable_integration,
"fulfillment_instructions": credit_provider.fulfillment_instructions,
"thumbnail_url": credit_provider.thumbnail_url
}
return JsonResponse(credit_provider_data)
@transaction.atomic
def create_credit_request(course_key, provider_id, username):
"""
Initiate a request for credit from a credit provider.
This will return the parameters that the user's browser will need to POST
to the credit provider. It does NOT calculate the signature.
Only users who are eligible for credit (have satisfied all credit requirements) are allowed to make requests.
A provider can be configured either with *integration enabled* or not.
If automatic integration is disabled, this method will simply return
a URL to the credit provider and method set to "GET", so the student can
visit the URL and request credit directly. No database record will be created
to track these requests.
If automatic integration *is* enabled, then this will also return the parameters
that the user's browser will need to POST to the credit provider.
These parameters will be digitally signed using a secret key shared with the credit provider.
A database record will be created to track the request with a 32-character UUID.
The returned dictionary can be used by the user's browser to send a POST request to the credit provider.
If a pending request already exists, this function should return a request description with the same UUID.
(Other parameters, such as the user's full name may be different than the original request).
If a completed request (either accepted or rejected) already exists, this function will
raise an exception. Users are not allowed to make additional requests once a request
has been completed.
Arguments:
course_key (CourseKey): The identifier for the course.
provider_id (str): The identifier of the credit provider.
username (str): The user initiating the request.
Returns: dict
Raises:
UserIsNotEligible: The user has not satisfied eligibility requirements for credit.
CreditProviderNotConfigured: The credit provider has not been configured for this course.
RequestAlreadyCompleted: The user has already submitted a request and received a response
from the credit provider.
Example Usage:
>>> create_credit_request(course.id, "hogwarts", "ron")
{
"url": "https://credit.example.com/request",
"method": "POST",
"parameters": {
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_org": "HogwartsX",
"course_num": "Potions101",
"course_run": "1T2015",
"final_grade": "0.95",
"user_username": "ron",
"user_email": "[email protected]",
"user_full_name": "Ron Weasley",
"user_mailing_address": "",
"user_country": "US",
"signature": "cRCNjkE4IzY+erIjRwOQCpRILgOvXx4q2qvx141BCqI="
}
}
"""
try:
user_eligibility = CreditEligibility.objects.select_related('course').get(
username=username,
course__course_key=course_key
)
credit_course = user_eligibility.course
credit_provider = CreditProvider.objects.get(provider_id=provider_id)
except CreditEligibility.DoesNotExist:
log.warning(
u'User "%s" tried to initiate a request for credit in course "%s", '
u'but the user is not eligible for credit',
username, course_key
)
raise UserIsNotEligible
except CreditProvider.DoesNotExist:
log.error(u'Credit provider with ID "%s" has not been configured.', provider_id)
raise CreditProviderNotConfigured
# Check if we've enabled automatic integration with the credit
# provider. If not, we'll show the user a link to a URL
# where the user can request credit directly from the provider.
# Note that we do NOT track these requests in our database,
# since the state would always be "pending" (we never hear back).
if not credit_provider.enable_integration:
return {
"url": credit_provider.provider_url,
"method": "GET",
"parameters": {}
}
else:
# If automatic credit integration is enabled, then try
# to retrieve the shared signature *before* creating the request.
# That way, if there's a misconfiguration, we won't have requests
# in our system that we know weren't sent to the provider.
shared_secret_key = get_shared_secret_key(credit_provider.provider_id)
if shared_secret_key is None:
msg = u'Credit provider with ID "{provider_id}" does not have a secret key configured.'.format(
provider_id=credit_provider.provider_id
)
log.error(msg)
raise CreditProviderNotConfigured(msg)
# Initiate a new request if one has not already been created
credit_request, created = CreditRequest.objects.get_or_create(
course=credit_course,
provider=credit_provider,
username=username,
)
# Check whether we've already gotten a response for a request,
# If so, we're not allowed to issue any further requests.
# Skip checking the status if we know that we just created this record.
if not created and credit_request.status != "pending":
log.warning(
(
u'Cannot initiate credit request because the request with UUID "%s" '
u'exists with status "%s"'
), credit_request.uuid, credit_request.status
)
raise RequestAlreadyCompleted
if created:
credit_request.uuid = uuid.uuid4().hex
# Retrieve user account and profile info
user = User.objects.select_related('profile').get(username=username)
# Retrieve the final grade from the eligibility table
try:
final_grade = CreditRequirementStatus.objects.get(
username=username,
requirement__namespace="grade",
requirement__name="grade",
requirement__course__course_key=course_key,
status="satisfied"
).reason["final_grade"]
# NOTE (CCB): Limiting the grade to seven characters is a hack for ASU.
if len(unicode(final_grade)) > 7:
final_grade = u'{:.5f}'.format(final_grade)
else:
final_grade = unicode(final_grade)
except (CreditRequirementStatus.DoesNotExist, TypeError, KeyError):
msg = 'Could not retrieve final grade from the credit eligibility table for ' \
'user [{user_id}] in course [{course_key}].'.format(user_id=user.id, course_key=course_key)
log.exception(msg)
raise UserIsNotEligible(msg)
# Getting the students's enrollment date
course_enrollment = CourseEnrollment.get_enrollment(user, course_key)
enrollment_date = course_enrollment.created if course_enrollment else ""
# Getting the student's course completion date
completion_date = get_last_exam_completion_date(course_key, username)
parameters = {
"request_uuid": credit_request.uuid,
"timestamp": to_timestamp(datetime.datetime.now(pytz.UTC)),
"course_org": course_key.org,
"course_num": course_key.course,
"course_run": course_key.run,
"enrollment_timestamp": to_timestamp(enrollment_date) if enrollment_date else "",
"course_completion_timestamp": to_timestamp(completion_date) if completion_date else "",
"final_grade": final_grade,
"user_username": user.username,
"user_email": user.email,
"user_full_name": user.profile.name,
"user_mailing_address": "",
"user_country": (
user.profile.country.code
if user.profile.country.code is not None
else ""
),
}
credit_request.parameters = parameters
credit_request.save()
if created:
log.info(u'Created new request for credit with UUID "%s"', credit_request.uuid)
else:
log.info(
u'Updated request for credit with UUID "%s" so the user can re-issue the request',
credit_request.uuid
)
# Sign the parameters using a secret key we share with the credit provider.
parameters["signature"] = signature(parameters, shared_secret_key)
return {
"url": credit_provider.provider_url,
"method": "POST",
"parameters": parameters
}
def update_credit_request_status(request_uuid, provider_id, status):
"""
Update the status of a credit request.
Approve or reject a request for a student to receive credit in a course
from a particular credit provider.
This function does NOT check that the status update is authorized.
The caller needs to handle authentication and authorization (checking the signature
of the message received from the credit provider)
The function is idempotent; if the request has already been updated to the status,
the function does nothing.
Arguments:
request_uuid (str): The unique identifier for the credit request.
provider_id (str): Identifier for the credit provider.
status (str): Either "approved" or "rejected"
Returns: None
Raises:
CreditRequestNotFound: No request exists that is associated with the given provider.
InvalidCreditStatus: The status is not either "approved" or "rejected".
"""
if status not in [CreditRequest.REQUEST_STATUS_APPROVED, CreditRequest.REQUEST_STATUS_REJECTED]:
raise InvalidCreditStatus
try:
request = CreditRequest.objects.get(uuid=request_uuid, provider__provider_id=provider_id)
old_status = request.status
request.status = status
request.save()
log.info(
u'Updated request with UUID "%s" from status "%s" to "%s" for provider with ID "%s".',
request_uuid, old_status, status, provider_id
)
except CreditRequest.DoesNotExist:
msg = (
u'Credit provider with ID "{provider_id}" attempted to '
u'update request with UUID "{request_uuid}", but no request '
u'with this UUID is associated with the provider.'
).format(provider_id=provider_id, request_uuid=request_uuid)
log.warning(msg)
raise CreditRequestNotFound(msg)
def get_credit_requests_for_user(username):
"""
Retrieve the status of a credit request.
Returns either "pending", "approved", or "rejected"
Arguments:
username (unicode): The username of the user who initiated the requests.
Returns: list
Example Usage:
>>> get_credit_request_status_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return CreditRequest.credit_requests_for_user(username)
def get_credit_request_status(username, course_key):
"""Get the credit request status.
This function returns the status of credit request of user for given course.
It returns the latest request status for the any credit provider.
The valid status are 'pending', 'approved' or 'rejected'.
Args:
username(str): The username of user
course_key(CourseKey): The course locator key
Returns:
A dictionary of credit request user has made if any
"""
credit_request = CreditRequest.get_user_request_status(username, course_key)
return {
"uuid": credit_request.uuid,
"timestamp": credit_request.modified,
"course_key": credit_request.course.course_key,
"provider": {
"id": credit_request.provider.provider_id,
"display_name": credit_request.provider.display_name
},
"status": credit_request.status
} if credit_request else {}
| agpl-3.0 |
takis/django | django/contrib/auth/admin.py | 28 | 8624 | from django.conf import settings
from django.conf.urls import url
from django.contrib import admin, messages
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import (
AdminPasswordChangeForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import Group, User
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.remote_field.model.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
return [
url(r'^(.+)/password/$', self.admin_site.admin_view(self.user_change_password), name='auth_user_password_change'),
] + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.atomic
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = self.get_object(request, unquote(id))
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': force_text(self.model._meta.verbose_name),
'key': escape(id),
})
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
'%s:auth_%s_change' % (
self.admin_site.name,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': (IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
context.update(admin.site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
| bsd-3-clause |
monkeysecurity/security_monkey | manage.py | 1 | 4070 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask.ext.script import Manager, Command, Option
from security_monkey import app, db
from security_monkey.common.route53 import Route53Service
from gunicorn.app.base import Application
from flask.ext.migrate import Migrate, MigrateCommand
from security_monkey.scheduler import run_change_reporter as sm_run_change_reporter
from security_monkey.scheduler import find_changes as sm_find_changes
from security_monkey.scheduler import audit_changes as sm_audit_changes
from security_monkey.backup import backup_config_to_json as sm_backup_config_to_json
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def drop_db():
""" Drops the database. """
db.drop_all()
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
def run_change_reporter(accounts):
""" Runs Reporter """
sm_run_change_reporter(accounts)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-m', '--monitors', dest='monitors', type=unicode, default=u'all')
def find_changes(accounts, monitors):
"""Runs watchers"""
sm_find_changes(accounts, monitors)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-m', '--monitors', dest='monitors', type=unicode, default=u'all')
@manager.option('-r', '--send_report', dest='send_report', type=bool, default=False)
def audit_changes(accounts, monitors, send_report):
""" Runs auditors """
sm_audit_changes(accounts, monitors, send_report)
@manager.option('-a', '--accounts', dest='accounts', type=unicode, default=u'all')
@manager.option('-m', '--monitors', dest='monitors', type=unicode, default=u'all')
@manager.option('-o', '--outputfolder', dest='outputfolder', type=unicode, default=u'backups')
def backup_config_to_json(accounts, monitors, outputfolder):
"""Saves the most current item revisions to a json file."""
sm_backup_config_to_json(accounts, monitors, outputfolder)
@manager.command
def start_scheduler():
""" starts the python scheduler to run the watchers and auditors"""
from security_monkey import scheduler
scheduler.setup_scheduler()
scheduler.scheduler.start()
class APIServer(Command):
def __init__(self, host='127.0.0.1', port=app.config.get('API_PORT'), workers=6):
self.address = "{}:{}".format(host, port)
self.workers = workers
def get_options(self):
return (
Option('-b', '--bind',
dest='address',
type=str,
default=self.address),
Option('-w', '--workers',
dest='workers',
type=int,
default=self.workers),
)
def handle(self, app, *args, **kwargs):
if app.config.get('USE_ROUTE53'):
route53 = Route53Service()
route53.register(app.config.get('FQDN'), exclusive=True)
workers = kwargs['workers']
address = kwargs['address']
class FlaskApplication(Application):
def init(self, parser, opts, args):
return {
'bind': address,
'workers': workers
}
def load(self):
return app
FlaskApplication().run()
if __name__ == "__main__":
manager.add_command("run_api_server", APIServer())
manager.run()
| apache-2.0 |
tahmid-tanzim/youtube-dl | youtube_dl/extractor/ro220.py | 176 | 1451 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
class Ro220IE(InfoExtractor):
IE_NAME = '220.ro'
_VALID_URL = r'(?x)(?:https?://)?(?:www\.)?220\.ro/(?P<category>[^/]+)/(?P<shorttitle>[^/]+)/(?P<id>[^/]+)'
_TEST = {
'url': 'http://www.220.ro/sport/Luati-Le-Banii-Sez-4-Ep-1/LYV6doKo7f/',
'md5': '03af18b73a07b4088753930db7a34add',
'info_dict': {
'id': 'LYV6doKo7f',
'ext': 'mp4',
'title': 'Luati-le Banii sez 4 ep 1',
'description': 're:^Iata-ne reveniti dupa o binemeritata vacanta\. +Va astept si pe Facebook cu pareri si comentarii.$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = compat_urllib_parse_unquote(self._search_regex(
r'(?s)clip\s*:\s*{.*?url\s*:\s*\'([^\']+)\'', webpage, 'url'))
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
formats = [{
'format_id': 'sd',
'url': url,
'ext': 'mp4',
}]
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense |
wkritzinger/asuswrt-merlin | release/src/router/samba-3.5.8/source4/scripting/python/samba/tests/dcerpc/unix.py | 24 | 1336 | #!/usr/bin/python
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <[email protected]> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from samba.dcerpc import unixinfo
from samba.tests import RpcInterfaceTestCase
class UnixinfoTests(RpcInterfaceTestCase):
def setUp(self):
self.conn = unixinfo.unixinfo("ncalrpc:", self.get_loadparm())
def test_getpwuid(self):
infos = self.conn.GetPWUid(range(512))
self.assertEquals(512, len(infos))
self.assertEquals("/bin/false", infos[0].shell)
self.assertTrue(isinstance(infos[0].homedir, unicode))
def test_gidtosid(self):
self.conn.GidToSid(1000)
def test_uidtosid(self):
self.conn.UidToSid(1000)
| gpl-2.0 |
yatish27/mase | python101/code/anagram_db.py | 14 | 1134 | """This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import shelve
import sys
from anagram_sets import *
def store_anagrams(filename, ad):
"""Stores the anagrams in ad in a shelf.
filename: string file name of shelf
ad: dictionary that maps strings to list of anagrams
"""
shelf = shelve.open(filename, 'c')
for word, word_list in ad.iteritems():
shelf[word] = word_list
shelf.close()
def read_anagrams(filename, word):
"""Looks up a word in a shelf and returns a list of its anagrams.
filename: string file name of shelf
word: word to look up
"""
shelf = shelve.open(filename)
sig = signature(word)
try:
return shelf[sig]
except KeyError:
return []
def main(name, command='store'):
if command == 'store':
ad = all_anagrams('words.txt')
store_anagrams('anagrams.db', ad)
else:
print read_anagrams('anagrams.db', command)
if __name__ == '__main__':
main(*sys.argv)
| unlicense |
robin900/gspread-dataframe | setup.py | 1 | 1661 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
import sys
PY3 = sys.version_info >= (3, 0)
with open(os.path.join(os.path.dirname(__file__), 'VERSION'), 'rb') as f:
VERSION = f.read()
if PY3:
VERSION = VERSION.decode('utf8')
VERSION = VERSION.strip()
with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'rb') as f:
long_description = f.read()
if PY3:
long_description = long_description.decode('utf8')
setup(
name='gspread-dataframe',
version=VERSION,
py_modules=['gspread_dataframe'],
test_suite='tests',
install_requires=[
'gspread>=3.0.0',
'pandas>=0.24.0',
'six>=1.12.0'
],
tests_require=['oauth2client'] + ([] if PY3 else ['mock']),
description='Read/write gspread worksheets using pandas DataFrames',
long_description=long_description,
author='Robin Thomas',
author_email='[email protected]',
license='MIT',
url='https://github.com/robin900/gspread-dataframe',
keywords=['spreadsheets', 'google-spreadsheets', 'pandas', 'dataframe'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
"Topic :: Software Development :: Libraries :: Python Modules"
],
zip_safe=True
)
| mit |
ehenneken/adsws | adsws/modules/oauth2server/testsuite/test_provider.py | 4 | 21833 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import logging
from unittest import skip
from flask import url_for
from adsws.testsuite import FlaskAppTestCase, make_test_suite, \
run_test_suite
from adsws.core import db
from mock import MagicMock
from flask_oauthlib.client import prepare_request
try:
from six.moves.urllib.parse import urlparse
except ImportError:
from urllib.parse import urlparse
from .helpers import create_client
logging.basicConfig(level=logging.DEBUG)
from adsws import factory
class OAuth2ProviderTestCase(FlaskAppTestCase):
def create_app(self):
try:
#app = super(OAuth2ProviderTestCase, self).create_app()
#app = api.create_app()
app = factory.create_app('adsws.modules.oauth2server',
SQLALCHEMY_DATABASE_URI='sqlite://',
TESTING = True,
LOGIN_DISABLED = False, # necessary to exercise flask_login+oauth2
WTF_CSRF_ENABLED = False,
DEBUG=False,
SQLALCHEMY_ECHO = False,
SITE_SECURE_URL='http://localhost',
EXTENSIONS=['adsws.ext.template',
'adsws.ext.sqlalchemy',
'adsws.ext.mail',
'adsws.ext.menu',
'adsws.ext.security'],
PACKAGES=['adsws.modules.oauth2server'])
app.testing = True
app.config.update(dict(
OAUTH2_CACHE_TYPE='simple',
))
client = create_client(app, 'oauth2test')
client.http_request = MagicMock(
side_effect=self.patch_request(app)
)
db.create_all(app=app)
return app
except Exception as e:
print(e)
raise
def patch_request(self, app):
test_client = app.test_client()
def make_request(uri, headers=None, data=None, method=None):
uri, headers, data, method = prepare_request(
uri, headers, data, method
)
if not headers and data is not None:
headers = {
'Content-Type': ' application/x-www-form-urlencoded'
}
# test client is a `werkzeug.test.Client`
parsed = urlparse(uri)
uri = '%s?%s' % (parsed.path, parsed.query)
resp = test_client.open(
uri, headers=headers, data=data, method=method
)
# for compatible
resp.code = resp.status_code
return resp, resp.data
return make_request
def setUp(self):
super(OAuth2ProviderTestCase, self).setUp()
# Set environment variable DEBUG to true, to allow testing without
# SSL in oauthlib.
if self.app.config.get('SITE_SECURE_URL').startswith('http://'):
self.os_debug = os.environ.get('OAUTHLIB_INSECURE_TRANSPORT', '')
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = 'true'
from ..models import OAuthClient, Scope
from adsws.core import user_manipulator
from ..registry import scopes as scopes_registry
# Register a test scope
scopes_registry.register(Scope('test:scope'))
self.base_url = self.app.config.get('SITE_SECURE_URL')
# Create needed objects
u = user_manipulator.new(
email='[email protected]',
password = 'tester',
active=True
)
u2 = user_manipulator.new(
email='[email protected]',
password = 'tester2',
active=True
)
user_manipulator.save(u)
user_manipulator.save(u2)
c1 = OAuthClient(
client_id='dev',
client_secret='dev',
name='dev',
description='',
is_confidential=False,
user_id=u.id,
_redirect_uris='%s/oauth2test/authorized' % self.base_url,
_default_scopes="test:scope"
)
c2 = OAuthClient(
client_id='confidential',
client_secret='confidential',
name='confidential',
description='',
is_confidential=True,
user_id=u.id,
_redirect_uris='%s/oauth2test/authorized' % self.base_url,
_default_scopes="test:scope"
)
db.session.add(c1)
db.session.add(c2)
db.session.commit()
self.objects = [u, u2, c1, c2]
# Create a personal access token as well.
from ..models import OAuthToken
self.personal_token = OAuthToken.create_personal(
'test-personal', 1, scopes=[], is_internal=True
)
def tearDown(self):
super(OAuth2ProviderTestCase, self).tearDown()
# Set back any previous value of DEBUG environment variable.
if self.app.config.get('SITE_SECURE_URL').startswith('http://'):
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = self.os_debug
self.base_url = None
for o in self.objects:
db.session.delete(o)
db.session.commit()
def parse_redirect(self, location, parse_fragment=False):
from werkzeug.urls import url_parse, url_decode, url_unparse
scheme, netloc, script_root, qs, anchor = url_parse(location)
return (
url_unparse((scheme, netloc, script_root, '', '')),
url_decode(anchor if parse_fragment else qs)
)
def test_client_salt(self):
from ..models import OAuthClient
c = OAuthClient(
name='Test something',
is_confidential=True,
user_id=1,
)
c.gen_salt()
assert len(c.client_id) == \
self.app.config.get('OAUTH2_CLIENT_ID_SALT_LEN')
assert len(c.client_secret) == \
self.app.config.get('OAUTH2_CLIENT_SECRET_SALT_LEN')
db.session.add(c)
db.session.commit()
def test_invalid_authorize_requests(self):
# First login on provider site
self.login("[email protected]", "tester")
for client_id in ['dev', 'confidential']:
redirect_uri = '%s/oauth2test/authorized' % self.base_url
scope = 'test:scope'
response_type = 'code'
error_url = url_for('oauth2server.errors')
# Valid request authorize request
r = self.client.get(url_for(
'oauth2server.authorize', redirect_uri=redirect_uri,
scope=scope, response_type=response_type, client_id=client_id,
))
self.assertStatus(r, 200)
# Invalid scope
r = self.client.get(url_for(
'oauth2server.authorize', redirect_uri=redirect_uri,
scope='INVALID', response_type=response_type,
client_id=client_id,
))
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
self.assertEqual(data['error'], 'invalid_scope')
assert redirect_uri in next_url
# Invalid response type
r = self.client.get(url_for(
'oauth2server.authorize', redirect_uri=redirect_uri,
scope=scope, response_type='invalid', client_id=client_id,
))
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
self.assertEqual(data['error'], 'unauthorized_client')
assert redirect_uri in next_url
# Missing arguments
r = self.client.get(url_for(
'oauth2server.authorize', client_id=client_id,
))
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
self.assertEqual(data['error'], 'invalid_request')
assert error_url in next_url
# Invalid cilent_id
r = self.client.get(url_for(
'oauth2server.authorize', redirect_uri=redirect_uri,
scope=scope, response_type=response_type, client_id='invalid',
))
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
self.assertEqual(data['error'], 'invalid_client_id')
assert error_url in next_url
r = self.client.get(next_url, query_string=data)
assert 'invalid_client_id' in r.data
def test_refresh_flow(self):
# First login on provider site
self.login("[email protected]", "tester")
data = dict(
redirect_uri='%s/oauth2test/authorized' % self.base_url,
scope='test:scope',
response_type='code',
client_id='confidential',
state='mystate'
)
r = self.client.get(url_for('oauth2server.authorize', **data))
self.assertStatus(r, 200)
data['confirm'] = 'yes'
data['scope'] = 'test:scope'
data['state'] = 'mystate'
# Obtain one time code
r = self.client.post(
url_for('oauth2server.authorize'), data=data
)
self.assertStatus(r, 302)
next_url, res_data = self.parse_redirect(r.location)
assert res_data['code']
assert res_data['state'] == 'mystate'
# Exchange one time code for access token
r = self.client.post(
url_for('oauth2server.access_token'), data=dict(
client_id='confidential',
client_secret='confidential',
grant_type='authorization_code',
code=res_data['code'],
)
)
self.assertStatus(r, 200)
assert r.json['access_token']
assert r.json['refresh_token']
assert r.json['scope'] == 'test:scope'
assert r.json['token_type'] == 'Bearer'
refresh_token = r.json['refresh_token']
old_access_token = r.json['access_token']
# Access token valid
r = self.client.get(url_for('oauth2server.info',
access_token=old_access_token))
self.assert200(r)
# Obtain new access token with refresh token
r = self.client.post(
url_for('oauth2server.access_token'), data=dict(
client_id='confidential',
client_secret='confidential',
grant_type='refresh_token',
refresh_token=refresh_token,
)
)
self.assertStatus(r, 200)
assert r.json['access_token']
assert r.json['refresh_token']
assert r.json['access_token'] != old_access_token
assert r.json['refresh_token'] != refresh_token
assert r.json['scope'] == 'test:scope'
assert r.json['token_type'] == 'Bearer'
# New access token valid
r = self.client.get(url_for('oauth2server.info',
access_token=r.json['access_token']))
self.assert200(r)
# Old access token no longer valid
r = self.client.get(url_for('oauth2server.info',
access_token=old_access_token,),
base_url=self.app.config['SITE_SECURE_URL'])
self.assert401(r)
def test_web_auth_flow(self):
# Go to login - should redirect to oauth2 server for login an
# authorization
#r = self.client.get('/oauth2test/test-ping')
#self.assertStatus(r, 403)
# First login on provider site
self.login("[email protected]", "tester")
r = self.client.get('/oauth2test/login')
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
# Authorize page
r = self.client.get(next_url, query_string=data)
self.assertStatus(r, 200)
# User confirms request
data['confirm'] = 'yes'
data['scope'] = 'test:scope'
data['state'] = ''
r = self.client.post(next_url, data=data)
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
assert next_url == '%s/oauth2test/authorized' % self.base_url
assert 'code' in data
# User is redirected back to client site.
# - The client view /oauth2test/authorized will in the
# background fetch the access token.
r = self.client.get(next_url, query_string=data)
self.assertStatus(r, 200)
# Authentication flow has now been completed, and the access
# token can be used to access protected resources.
r = self.client.get('/oauth2test/test-ping')
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
# Authentication flow has now been completed, and the access
# token can be used to access protected resources.
r = self.client.get('/oauth2test/test-ping')
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
r = self.client.get('/oauth2test/test-info')
self.assert200(r)
assert r.json.get('client') == 'confidential'
assert r.json.get('user') == self.objects[0].id
assert r.json.get('scopes') == [u'test:scope']
# Access token doesn't provide access to this URL.
r = self.client.get(
'/oauth2test/test-invalid',
base_url=self.app.config['SITE_SECURE_URL']
)
self.assertStatus(r, 401)
# # Now logout
r = self.client.get('/oauth2test/logout')
self.assertStatus(r, 200)
assert r.data == "logout"
# And try to access the information again
r = self.client.get('/oauth2test/test-ping')
self.assert403(r)
def test_implicit_flow(self):
# First login on provider site
self.login("[email protected]", "tester")
for client_id in ['dev', 'confidential']:
data = dict(
redirect_uri='%s/oauth2test/authorized' % self.base_url,
response_type='token', # For implicit grant type
client_id=client_id,
scope='test:scope',
state='teststate'
)
# Authorize page
r = self.client.get(url_for(
'oauth2server.authorize',
**data
))
self.assertStatus(r, 200)
# User confirms request
data['confirm'] = 'yes'
data['scope'] = 'test:scope'
data['state'] = 'teststate'
r = self.client.post(url_for('oauth2server.authorize'), data=data)
self.assertStatus(r, 302)
# Important - access token exists in URI fragment and must not be
# sent to the client.
next_url, data = self.parse_redirect(r.location, parse_fragment=True)
assert data['access_token']
assert data['token_type'] == 'Bearer'
assert data['state'] == 'teststate'
assert data['scope'] == 'test:scope'
assert data.get('refresh_token') is None
assert next_url == '%s/oauth2test/authorized' % self.base_url
# Authentication flow has now been completed, and the client can
# use the access token to make request to the provider.
r = self.client.get(url_for('oauth2server.info',
access_token=data['access_token']))
self.assert200(r)
assert r.json.get('client') == client_id
assert r.json.get('user') == self.objects[0].id
assert r.json.get('scopes') == [u'test:scope']
def test_client_flow(self):
data = dict(
client_id='dev',
client_secret='dev', # A public client should NOT do this!
grant_type='client_credentials',
scope='test:scope',
)
# Public clients are not allowed to use grant_type=client_credentials
r = self.client.post(url_for(
'oauth2server.access_token',
**data
))
self.assertStatus(r, 401)
self.assertEqual(r.json['error'], 'invalid_client')
data = dict(
client_id='confidential',
client_secret='confidential',
grant_type='client_credentials',
scope='test:scope',
)
# Retrieve access token using client_crendentials
r = self.client.post(url_for(
'oauth2server.access_token',
**data
))
self.assertStatus(r, 200)
data = r.json
assert data['access_token']
assert data['token_type'] == 'Bearer'
assert data['scope'] == 'test:scope'
assert data.get('refresh_token') is None
# Authentication flow has now been completed, and the client can
# use the access token to make request to the provider.
r = self.client.get(url_for('oauth2server.info',
access_token=data['access_token']))
self.assert200(r)
assert r.json.get('client') == 'confidential'
assert r.json.get('user') == self.objects[0].id
assert r.json.get('scopes') == [u'test:scope']
def test_auth_flow_denied(self):
# First login on provider site
self.login("[email protected]", "tester")
r = self.client.get('/oauth2test/login')
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
# Authorize page
r = self.client.get(next_url, query_string=data)
self.assertStatus(r, 200)
# User rejects request
data['confirm'] = 'no'
data['scope'] = 'test:scope'
data['state'] = ''
r = self.client.post(next_url, data=data)
self.assertStatus(r, 302)
next_url, data = self.parse_redirect(r.location)
assert next_url == '%s/oauth2test/authorized' % self.base_url
assert data.get('error') == 'access_denied'
# Returned
r = self.client.get(next_url, query_string=data)
self.assert200(r)
assert r.data == "Access denied: error=access_denied"
def test_personal_access_token(self):
r = self.client.get(
'/oauth/ping',
query_string="access_token=%s" % self.personal_token.access_token
)
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
# Access token is not valid for this scope
r = self.client.get(
'/oauth/info/',
query_string="access_token=%s" % self.personal_token.access_token,
base_url=self.app.config['SITE_SECURE_URL']
)
self.assertStatus(r, 401)
def test_resource_auth_methods(self):
# Query string
r = self.client.get(
'/oauth/ping',
query_string="access_token=%s" % self.personal_token.access_token
)
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
# POST request body
r = self.client.post(
'/oauth/ping',
data=dict(access_token=self.personal_token.access_token),
)
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
# Authorization Header
r = self.client.get(
'/oauth/ping',
headers=[
("Authorization",
"Bearer %s" % self.personal_token.access_token),
]
)
self.assert200(r)
self.assertEqual(r.json, dict(ping='pong'))
@skip("Settings not yet implemented")
def test_settings_index(self):
# Create a remote account (linked account)
self.login("[email protected]", "tester")
res = self.client.get(
url_for('oauth2server_settings.index'),
base_url=self.app.config['SITE_SECURE_URL'],
)
self.assert200(res)
res = self.client.get(
url_for('oauth2server_settings.client_new'),
base_url=self.app.config['SITE_SECURE_URL'],
)
self.assert200(res)
# Valid POST
res = self.client.post(
url_for('oauth2server_settings.client_new'),
base_url=self.app.config['SITE_SECURE_URL'],
data=dict(
name='Test',
description='Test description',
website='http://invenio-software.org',
redirect_uris="http://localhost/oauth/authorized/"
)
)
self.assertStatus(res, 302)
# Invalid redirect_uri (must be https)
res = self.client.post(
url_for('oauth2server_settings.client_new'),
base_url=self.app.config['SITE_SECURE_URL'],
data=dict(
name='Test',
description='Test description',
website='http://invenio-software.org',
redirect_uris="http://example.org/oauth/authorized/"
)
)
self.assertStatus(res, 200)
# Valid
res = self.client.post(
url_for('oauth2server_settings.client_new'),
base_url=self.app.config['SITE_SECURE_URL'],
data=dict(
name='Test',
description='Test description',
website='http://invenio-software.org',
redirect_uris="https://example.org/oauth/authorized/\n"
"http://localhost:4000/oauth/authorized/"
)
)
self.assertStatus(res, 302)
TEST_SUITE = make_test_suite(OAuth2ProviderTestCase)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
todaychi/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/set_fake_passwords.py | 35 | 1846 | """
set_fake_passwords.py
Reset all user passwords to a common value. Useful for testing in a
development environment. As such, this command is only available when
setting.DEBUG is True.
"""
from optparse import make_option
from django.conf import settings
from django.core.management.base import NoArgsCommand, CommandError
from django_extensions.management.utils import signalcommand
DEFAULT_FAKE_PASSWORD = 'password'
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--prompt', dest='prompt_passwd', default=False, action='store_true',
help='Prompts for the new password to apply to all users'),
make_option('--password', dest='default_passwd', default=DEFAULT_FAKE_PASSWORD,
help='Use this as default password.'),
)
help = 'DEBUG only: sets all user passwords to a common value ("%s" by default)' % (DEFAULT_FAKE_PASSWORD, )
requires_model_validation = False
@signalcommand
def handle_noargs(self, **options):
if not settings.DEBUG:
raise CommandError('Only available in debug mode')
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from django_extensions.future_1_5 import get_user_model
if options.get('prompt_passwd', False):
from getpass import getpass
passwd = getpass('Password: ')
if not passwd:
raise CommandError('You must enter a valid password')
else:
passwd = options.get('default_passwd', DEFAULT_FAKE_PASSWORD)
User = get_user_model()
user = User()
user.set_password(passwd)
count = User.objects.all().update(password=user.password)
print('Reset %d passwords' % count)
| apache-2.0 |
Glasgow2015/team-10 | env/lib/python2.7/site-packages/treebeard/tests/conftest.py | 4 | 2640 | import os
import sys
import time
os.environ['DJANGO_SETTINGS_MODULE'] = 'treebeard.tests.settings'
import django
from django.conf import settings
from django.test.utils import (setup_test_environment,
teardown_test_environment)
from django.test.client import Client
from django.core.management import call_command
from django.core import mail
from django.db import connection
from django.db.models.base import ModelBase
from _pytest import python as _pytest_python
def idmaker(argnames, argvalues):
idlist = []
for valindex, valset in enumerate(argvalues):
this_id = []
for nameindex, val in enumerate(valset):
argname = argnames[nameindex]
if isinstance(val, (float, int, str)):
this_id.append(str(val))
elif isinstance(val, ModelBase):
this_id.append(val.__name__)
else:
this_id.append("{0}-{1}={2!s}".format(argname, valindex))
idlist.append("][".join(this_id))
return idlist
_pytest_python.idmaker = idmaker
def pytest_report_header(config):
return 'Django: ' + django.get_version()
def pytest_configure(config):
if django.VERSION >= (1, 7):
django.setup()
setup_test_environment()
connection.creation.create_test_db(verbosity=2, autoclobber=True)
def pytest_unconfigure(config):
dbsettings = settings.DATABASES['default']
if django.VERSION >= (1, 7):
dbtestname = dbsettings['TEST']['NAME']
else:
dbtestname = dbsettings['TEST_NAME']
connection.close()
if dbsettings['ENGINE'].split('.')[-1] == 'postgresql_psycopg2':
connection.connection = None
connection.settings_dict['NAME'] = dbtestname.split('_')[1]
cursor = connection.cursor()
connection.autocommit = True
if django.VERSION < (1, 6):
connection._set_isolation_level(0)
else:
connection._set_autocommit(True)
time.sleep(1)
sys.stdout.write(
"Destroying test database for alias '%s' (%s)...\n" % (
connection.alias, dbtestname)
)
sys.stdout.flush()
cursor.execute(
'DROP DATABASE %s' % connection.ops.quote_name(dbtestname))
else:
connection.creation.destroy_test_db(dbtestname, verbosity=2)
teardown_test_environment()
def pytest_funcarg__client(request):
def setup():
mail.outbox = []
return Client()
def teardown(client):
call_command('flush', verbosity=0, interactive=False)
return request.cached_setup(setup, teardown, 'function')
| apache-2.0 |
prabhu-k/three.js | utils/converters/obj/convert_obj_three.py | 160 | 48659 | """Convert Wavefront OBJ / MTL files into Three.js (JSON model version, to be used with ascii / binary loader)
-------------------------
How to use this converter
-------------------------
python convert_obj_three.py -i infile.obj -o outfile.js [-m "morphfiles*.obj"] [-c "morphcolors*.obj"] [-a center|centerxz|top|bottom|none] [-s smooth|flat] [-t ascii|binary] [-d invert|normal] [-b] [-e]
Notes:
- flags
-i infile.obj input OBJ file
-o outfile.js output JS file
-m "morphfiles*.obj" morph OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-c "morphcolors*.obj" morph colors OBJ files (can use wildcards, enclosed in quotes multiple patterns separate by space)
-a center|centerxz|top|bottom|none model alignment
-s smooth|flat smooth = export vertex normals, flat = no normals (face normals computed in loader)
-t ascii|binary export ascii or binary format (ascii has more features, binary just supports vertices, faces, normals, uvs and materials)
-d invert|normal invert transparency
-b bake material colors into face colors
-x 10.0 scale and truncate
-f 2 morph frame sampling step
- by default:
use smooth shading (if there were vertex normals in the original model)
will be in ASCII format
original model is assumed to use non-inverted transparency / dissolve (0.0 fully transparent, 1.0 fully opaque)
no face colors baking
no scale and truncate
morph frame step = 1 (all files will be processed)
- binary conversion will create two files:
outfile.js (materials)
outfile.bin (binary buffers)
--------------------------------------------------
How to use generated JS file in your HTML document
--------------------------------------------------
<script type="text/javascript" src="Three.js"></script>
...
<script type="text/javascript">
...
// load ascii model
var jsonLoader = new THREE.JSONLoader();
jsonLoader.load( "Model_ascii.js", createScene );
// load binary model
var binLoader = new THREE.BinaryLoader();
binLoader.load( "Model_bin.js", createScene );
function createScene( geometry, materials ) {
var mesh = new THREE.Mesh( geometry, new THREE.MeshFaceMaterial( materials ) );
}
...
</script>
-------------------------------------
Parsers based on formats descriptions
-------------------------------------
http://en.wikipedia.org/wiki/Obj
http://en.wikipedia.org/wiki/Material_Template_Library
-------------------
Current limitations
-------------------
- for the moment, only diffuse color and texture are used
(will need to extend shaders / renderers / materials in Three)
- texture coordinates can be wrong in canvas renderer
(there is crude normalization, but it doesn't
work for all cases)
- smoothing can be turned on/off only for the whole mesh
----------------------------------------------
How to get proper OBJ + MTL files with Blender
----------------------------------------------
0. Remove default cube (press DEL and ENTER)
1. Import / create model
2. Select all meshes (Select -> Select All by Type -> Mesh)
3. Export to OBJ (File -> Export -> Wavefront .obj)
- enable following options in exporter
Material Groups
Rotate X90
Apply Modifiers
High Quality Normals
Copy Images
Selection Only
Objects as OBJ Objects
UVs
Normals
Materials
- select empty folder
- give your exported file name with "obj" extension
- click on "Export OBJ" button
4. Your model is now all files in this folder (OBJ, MTL, number of images)
- this converter assumes all files staying in the same folder,
(OBJ / MTL files use relative paths)
- for WebGL, textures must be power of 2 sized
------
Author
------
AlteredQualia http://alteredqualia.com
"""
import fileinput
import operator
import random
import os.path
import getopt
import sys
import struct
import math
import glob
# #####################################################
# Configuration
# #####################################################
ALIGN = "none" # center centerxz bottom top none
SHADING = "smooth" # smooth flat
TYPE = "ascii" # ascii binary
TRANSPARENCY = "normal" # normal invert
TRUNCATE = False
SCALE = 1.0
FRAMESTEP = 1
BAKE_COLORS = False
# default colors for debugging (each material gets one distinct color):
# white, red, green, blue, yellow, cyan, magenta
COLORS = [0xeeeeee, 0xee0000, 0x00ee00, 0x0000ee, 0xeeee00, 0x00eeee, 0xee00ee]
# #####################################################
# Templates
# #####################################################
TEMPLATE_FILE_ASCII = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"colors" : %(ncolor)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"scale" : %(scale)f,
"materials": [%(materials)s],
"vertices": [%(vertices)s],
"morphTargets": [%(morphTargets)s],
"morphColors": [%(morphColors)s],
"normals": [%(normals)s],
"colors": [%(colors)s],
"uvs": [[%(uvs)s]],
"faces": [%(faces)s]
}
"""
TEMPLATE_FILE_BIN = u"""\
{
"metadata" :
{
"formatVersion" : 3.1,
"sourceFile" : "%(fname)s",
"generatedBy" : "OBJConverter",
"vertices" : %(nvertex)d,
"faces" : %(nface)d,
"normals" : %(nnormal)d,
"uvs" : %(nuv)d,
"materials" : %(nmaterial)d
},
"materials": [%(materials)s],
"buffers": "%(buffers)s"
}
"""
TEMPLATE_VERTEX = "%f,%f,%f"
TEMPLATE_VERTEX_TRUNCATE = "%d,%d,%d"
TEMPLATE_N = "%.5g,%.5g,%.5g"
TEMPLATE_UV = "%.5g,%.5g"
TEMPLATE_COLOR = "%.3g,%.3g,%.3g"
TEMPLATE_COLOR_DEC = "%d"
TEMPLATE_MORPH_VERTICES = '\t{ "name": "%s", "vertices": [%s] }'
TEMPLATE_MORPH_COLORS = '\t{ "name": "%s", "colors": [%s] }'
# #####################################################
# Utils
# #####################################################
def file_exists(filename):
"""Return true if file exists and is accessible for reading.
Should be safer than just testing for existence due to links and
permissions magic on Unix filesystems.
@rtype: boolean
"""
try:
f = open(filename, 'r')
f.close()
return True
except IOError:
return False
def get_name(fname):
"""Create model name based of filename ("path/fname.js" -> "fname").
"""
return os.path.splitext(os.path.basename(fname))[0]
def bbox(vertices):
"""Compute bounding box of vertex array.
"""
if len(vertices)>0:
minx = maxx = vertices[0][0]
miny = maxy = vertices[0][1]
minz = maxz = vertices[0][2]
for v in vertices[1:]:
if v[0]<minx:
minx = v[0]
elif v[0]>maxx:
maxx = v[0]
if v[1]<miny:
miny = v[1]
elif v[1]>maxy:
maxy = v[1]
if v[2]<minz:
minz = v[2]
elif v[2]>maxz:
maxz = v[2]
return { 'x':[minx,maxx], 'y':[miny,maxy], 'z':[minz,maxz] }
else:
return { 'x':[0,0], 'y':[0,0], 'z':[0,0] }
def translate(vertices, t):
"""Translate array of vertices by vector t.
"""
for i in xrange(len(vertices)):
vertices[i][0] += t[0]
vertices[i][1] += t[1]
vertices[i][2] += t[2]
def center(vertices):
"""Center model (middle of bounding box).
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0] + (bb['y'][1] - bb['y'][0])/2.0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def top(vertices):
"""Align top of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][1]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def bottom(vertices):
"""Align bottom of the model with the floor (Y-axis) and center it around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = bb['y'][0]
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def centerxz(vertices):
"""Center model around X and Z.
"""
bb = bbox(vertices)
cx = bb['x'][0] + (bb['x'][1] - bb['x'][0])/2.0
cy = 0
cz = bb['z'][0] + (bb['z'][1] - bb['z'][0])/2.0
translate(vertices, [-cx,-cy,-cz])
def normalize(v):
"""Normalize 3d vector"""
l = math.sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
if l:
v[0] /= l
v[1] /= l
v[2] /= l
def veckey3(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
# #####################################################
# MTL parser
# #####################################################
def texture_relative_path(fullpath):
texture_file = os.path.basename(fullpath.replace("\\", "/"))
return texture_file
def parse_mtl(fname):
"""Parse MTL file.
"""
materials = {}
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "newmtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Material start
# newmtl identifier
if chunks[0] == "newmtl":
if len(chunks) > 1:
identifier = chunks[1]
else:
identifier = ""
if not identifier in materials:
materials[identifier] = {}
# Diffuse texture
# map_Kd texture_diffuse.jpg
if chunks[0] == "map_Kd" and len(chunks) == 2:
materials[identifier]["mapDiffuse"] = texture_relative_path(chunks[1])
# Ambient texture
# map_Ka texture_ambient.jpg
if chunks[0] == "map_Ka" and len(chunks) == 2:
materials[identifier]["mapAmbient"] = texture_relative_path(chunks[1])
# Specular texture
# map_Ks texture_specular.jpg
if chunks[0] == "map_Ks" and len(chunks) == 2:
materials[identifier]["mapSpecular"] = texture_relative_path(chunks[1])
# Alpha texture
# map_d texture_alpha.png
if chunks[0] == "map_d" and len(chunks) == 2:
materials[identifier]["transparent"] = True
materials[identifier]["mapAlpha"] = texture_relative_path(chunks[1])
# Bump texture
# map_bump texture_bump.jpg or bump texture_bump.jpg
if (chunks[0] == "map_bump" or chunks[0] == "bump") and len(chunks) == 2:
materials[identifier]["mapBump"] = texture_relative_path(chunks[1])
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Diffuse color
# Kd 1.000 1.000 1.000
if chunks[0] == "Kd" and len(chunks) == 4:
materials[identifier]["colorDiffuse"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Ambient color
# Ka 1.000 1.000 1.000
if chunks[0] == "Ka" and len(chunks) == 4:
materials[identifier]["colorAmbient"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular color
# Ks 1.000 1.000 1.000
if chunks[0] == "Ks" and len(chunks) == 4:
materials[identifier]["colorSpecular"] = [float(chunks[1]), float(chunks[2]), float(chunks[3])]
# Specular coefficient
# Ns 154.000
if chunks[0] == "Ns" and len(chunks) == 2:
materials[identifier]["specularCoef"] = float(chunks[1])
# Transparency
# Tr 0.9 or d 0.9
if (chunks[0] == "Tr" or chunks[0] == "d") and len(chunks) == 2:
materials[identifier]["transparent"] = True
if TRANSPARENCY == "invert":
materials[identifier]["transparency"] = 1.0 - float(chunks[1])
else:
materials[identifier]["transparency"] = float(chunks[1])
# Optical density
# Ni 1.0
if chunks[0] == "Ni" and len(chunks) == 2:
materials[identifier]["opticalDensity"] = float(chunks[1])
# Illumination
# illum 2
#
# 0. Color on and Ambient off
# 1. Color on and Ambient on
# 2. Highlight on
# 3. Reflection on and Ray trace on
# 4. Transparency: Glass on, Reflection: Ray trace on
# 5. Reflection: Fresnel on and Ray trace on
# 6. Transparency: Refraction on, Reflection: Fresnel off and Ray trace on
# 7. Transparency: Refraction on, Reflection: Fresnel on and Ray trace on
# 8. Reflection on and Ray trace off
# 9. Transparency: Glass on, Reflection: Ray trace off
# 10. Casts shadows onto invisible surfaces
if chunks[0] == "illum" and len(chunks) == 2:
materials[identifier]["illumination"] = int(chunks[1])
return materials
# #####################################################
# OBJ parser
# #####################################################
def parse_vertex(text):
"""Parse text chunk specifying single vertex.
Possible formats:
vertex index
vertex index / texture index
vertex index / texture index / normal index
vertex index / / normal index
"""
v = 0
t = 0
n = 0
chunks = text.split("/")
v = int(chunks[0])
if len(chunks) > 1:
if chunks[1]:
t = int(chunks[1])
if len(chunks) > 2:
if chunks[2]:
n = int(chunks[2])
return { 'v':v, 't':t, 'n':n }
def parse_obj(fname):
"""Parse OBJ file.
"""
vertices = []
normals = []
uvs = []
faces = []
materials = {}
material = ""
mcounter = 0
mcurrent = 0
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
previous_line = ""
for line in fileinput.input(fname):
line = previous_line + line
if line[-2:-1] == '\\':
previous_line = line[:-2]
continue
previous_line = ""
# Only split once initially for single-parameter tags that might have additional spaces in
# their values (i.e. "usemtl Material with spaces").
chunks = line.split(None, 1)
if len(chunks) > 0:
if len(chunks) > 1:
chunks[1] = chunks[1].strip()
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Split the remaining parameters.
if len(chunks) > 1:
chunks = [chunks[0]] + chunks[1].split()
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
vertices.append([x,y,z])
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = float(chunks[3])
normals.append([x,y,z])
# Texture coordinates in (u,v[,w]) coordinates, w is optional
# vt 0.500 -1.352 [0.234]
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
w = 0
if len(chunks)>3:
w = float(chunks[3])
uvs.append([u,v,w])
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
for v in chunks[1:]:
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
faces.append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'material':mcurrent,
'group':group,
'object':object,
'smooth':smooth,
})
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
return faces, vertices, uvs, normals, materials, mtllib
# #####################################################
# Generator - faces
# #####################################################
def setBit(value, position, on):
if on:
mask = 1 << position
return (value | mask)
else:
mask = ~(1 << position)
return (value & mask)
def generate_face(f, fc):
isTriangle = ( len(f['vertex']) == 3 )
if isTriangle:
nVertices = 3
else:
nVertices = 4
hasMaterial = True # for the moment OBJs without materials get default material
hasFaceUvs = False # not supported in OBJ
hasFaceVertexUvs = ( len(f['uv']) >= nVertices )
hasFaceNormals = False # don't export any face normals (as they are computed in engine)
hasFaceVertexNormals = ( len(f["normal"]) >= nVertices and SHADING == "smooth" )
hasFaceColors = BAKE_COLORS
hasFaceVertexColors = False # not supported in OBJ
faceType = 0
faceType = setBit(faceType, 0, not isTriangle)
faceType = setBit(faceType, 1, hasMaterial)
faceType = setBit(faceType, 2, hasFaceUvs)
faceType = setBit(faceType, 3, hasFaceVertexUvs)
faceType = setBit(faceType, 4, hasFaceNormals)
faceType = setBit(faceType, 5, hasFaceVertexNormals)
faceType = setBit(faceType, 6, hasFaceColors)
faceType = setBit(faceType, 7, hasFaceVertexColors)
faceData = []
# order is important, must match order in JSONLoader
# face type
# vertex indices
# material index
# face uvs index
# face vertex uvs indices
# face normal index
# face vertex normals indices
# face color index
# face vertex colors indices
faceData.append(faceType)
# must clamp in case on polygons bigger than quads
for i in xrange(nVertices):
index = f['vertex'][i] - 1
faceData.append(index)
faceData.append( f['material'] )
if hasFaceVertexUvs:
for i in xrange(nVertices):
index = f['uv'][i] - 1
faceData.append(index)
if hasFaceVertexNormals:
for i in xrange(nVertices):
index = f['normal'][i] - 1
faceData.append(index)
if hasFaceColors:
index = fc['material']
faceData.append(index)
return ",".join( map(str, faceData) )
# #####################################################
# Generator - chunks
# #####################################################
def hexcolor(c):
return ( int(c[0] * 255) << 16 ) + ( int(c[1] * 255) << 8 ) + int(c[2] * 255)
def generate_vertex(v, option_vertices_truncate, scale):
if not option_vertices_truncate:
return TEMPLATE_VERTEX % (v[0], v[1], v[2])
else:
return TEMPLATE_VERTEX_TRUNCATE % (scale * v[0], scale * v[1], scale * v[2])
def generate_normal(n):
return TEMPLATE_N % (n[0], n[1], n[2])
def generate_uv(uv):
return TEMPLATE_UV % (uv[0], uv[1])
def generate_color_rgb(c):
return TEMPLATE_COLOR % (c[0], c[1], c[2])
def generate_color_decimal(c):
return TEMPLATE_COLOR_DEC % hexcolor(c)
# #####################################################
# Morphs
# #####################################################
def generate_morph_vertex(name, vertices):
vertex_string = ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices)
return TEMPLATE_MORPH_VERTICES % (name, vertex_string)
def generate_morph_color(name, colors):
color_string = ",".join(generate_color_rgb(c) for c in colors)
return TEMPLATE_MORPH_COLORS % (name, color_string)
def extract_material_colors(materials, mtlfilename, basename):
"""Extract diffuse colors from MTL materials
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
mtlColorArraySrt = []
for m in mtl:
if m in materials:
index = materials[m]
color = mtl[m].get("colorDiffuse", [1,0,0])
mtlColorArraySrt.append([index, color])
mtlColorArraySrt.sort()
mtlColorArray = [x[1] for x in mtlColorArraySrt]
return mtlColorArray
def extract_face_colors(faces, material_colors):
"""Extract colors from materials and assign them to faces
"""
faceColors = []
for face in faces:
material_index = face['material']
faceColors.append(material_colors[material_index])
return faceColors
def generate_morph_targets(morphfiles, n_vertices, infile):
skipOriginalMorph = False
norminfile = os.path.normpath(infile)
morphVertexData = []
for mfilepattern in morphfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
indices = range(0, len(matches), FRAMESTEP)
for i in indices:
path = matches[i]
normpath = os.path.normpath(path)
if normpath != norminfile or not skipOriginalMorph:
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
else:
if ALIGN == "center":
center(morphVertices)
elif ALIGN == "centerxz":
centerxz(morphVertices)
elif ALIGN == "bottom":
bottom(morphVertices)
elif ALIGN == "top":
top(morphVertices)
morphVertexData.append((get_name(name), morphVertices))
print "adding [%s] with %d vertices" % (name, n_morph_vertices)
morphTargets = ""
if len(morphVertexData):
morphTargets = "\n%s\n\t" % ",\n".join(generate_morph_vertex(name, vertices) for name, vertices in morphVertexData)
return morphTargets
def generate_morph_colors(colorfiles, n_vertices, n_faces):
morphColorData = []
colorFaces = []
materialColors = []
for mfilepattern in colorfiles.split():
matches = glob.glob(mfilepattern)
matches.sort()
for path in matches:
normpath = os.path.normpath(path)
name = os.path.basename(normpath)
morphFaces, morphVertices, morphUvs, morphNormals, morphMaterials, morphMtllib = parse_obj(normpath)
n_morph_vertices = len(morphVertices)
n_morph_faces = len(morphFaces)
if n_vertices != n_morph_vertices:
print "WARNING: skipping morph color map [%s] with different number of vertices [%d] than the original model [%d]" % (name, n_morph_vertices, n_vertices)
elif n_faces != n_morph_faces:
print "WARNING: skipping morph color map [%s] with different number of faces [%d] than the original model [%d]" % (name, n_morph_faces, n_faces)
else:
morphMaterialColors = extract_material_colors(morphMaterials, morphMtllib, normpath)
morphFaceColors = extract_face_colors(morphFaces, morphMaterialColors)
morphColorData.append((get_name(name), morphFaceColors))
# take first color map for baking into face colors
if len(colorFaces) == 0:
colorFaces = morphFaces
materialColors = morphMaterialColors
print "adding [%s] with %d face colors" % (name, len(morphFaceColors))
morphColors = ""
if len(morphColorData):
morphColors = "\n%s\n\t" % ",\n".join(generate_morph_color(name, colors) for name, colors in morphColorData)
return morphColors, colorFaces, materialColors
# #####################################################
# Materials
# #####################################################
def generate_color(i):
"""Generate hex color corresponding to integer.
Colors should have well defined ordering.
First N colors are hardcoded, then colors are random
(must seed random number generator with deterministic value
before getting colors).
"""
if i < len(COLORS):
#return "0x%06x" % COLORS[i]
return COLORS[i]
else:
#return "0x%06x" % int(0xffffff * random.random())
return int(0xffffff * random.random())
def value2string(v):
if type(v)==str and v[0:2] != "0x":
return '"%s"' % v
elif type(v) == bool:
return str(v).lower()
return str(v)
def generate_materials(mtl, materials):
"""Generate JS array of materials objects
JS material objects are basically prettified one-to-one
mappings of MTL properties in JSON format.
"""
mtl_array = []
for m in mtl:
if m in materials:
index = materials[m]
# add debug information
# materials should be sorted according to how
# they appeared in OBJ file (for the first time)
# this index is identifier used in face definitions
mtl[m]['DbgName'] = m
mtl[m]['DbgIndex'] = index
mtl[m]['DbgColor'] = generate_color(index)
if BAKE_COLORS:
mtl[m]['vertexColors'] = "face"
mtl_raw = ",\n".join(['\t"%s" : %s' % (n, value2string(v)) for n,v in sorted(mtl[m].items())])
mtl_string = "\t{\n%s\n\t}" % mtl_raw
mtl_array.append([index, mtl_string])
return ",\n\n".join([m for i,m in sorted(mtl_array)])
def generate_mtl(materials):
"""Generate dummy materials (if there is no MTL file).
"""
mtl = {}
for m in materials:
index = materials[m]
mtl[m] = {
'DbgName': m,
'DbgIndex': index,
'DbgColor': generate_color(index)
}
return mtl
def generate_materials_string(materials, mtlfilename, basename):
"""Generate final materials string.
"""
if not materials:
materials = { 'default': 0 }
mtl = create_materials(materials, mtlfilename, basename)
return generate_materials(mtl, materials)
def create_materials(materials, mtlfilename, basename):
"""Parse MTL file and create mapping between its materials and OBJ materials.
Eventual edge cases are handled here (missing materials, missing MTL file).
"""
random.seed(42) # to get well defined color order for debug colors
# default materials with debug colors for when
# there is no specified MTL / MTL loading failed,
# or if there were no materials / null materials
mtl = generate_mtl(materials)
if mtlfilename:
# create full pathname for MTL (included from OBJ)
path = os.path.dirname(basename)
fname = os.path.join(path, mtlfilename)
if file_exists(fname):
# override default materials with real ones from MTL
# (where they exist, otherwise keep defaults)
mtl.update(parse_mtl(fname))
else:
print "Couldn't find [%s]" % fname
return mtl
# #####################################################
# Faces
# #####################################################
def is_triangle_flat(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_triangle_flat_uv(f):
return len(f['vertex'])==3 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==3
def is_triangle_smooth(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_triangle_smooth_uv(f):
return len(f['vertex'])==3 and f["normal"] and SHADING == "smooth" and len(f['uv'])==3
def is_quad_flat(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and not f['uv']
def is_quad_flat_uv(f):
return len(f['vertex'])==4 and not (f["normal"] and SHADING == "smooth") and len(f['uv'])==4
def is_quad_smooth(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and not f['uv']
def is_quad_smooth_uv(f):
return len(f['vertex'])==4 and f["normal"] and SHADING == "smooth" and len(f['uv'])==4
def sort_faces(faces):
data = {
'triangles_flat': [],
'triangles_flat_uv': [],
'triangles_smooth': [],
'triangles_smooth_uv': [],
'quads_flat': [],
'quads_flat_uv': [],
'quads_smooth': [],
'quads_smooth_uv': []
}
for f in faces:
if is_triangle_flat(f):
data['triangles_flat'].append(f)
elif is_triangle_flat_uv(f):
data['triangles_flat_uv'].append(f)
elif is_triangle_smooth(f):
data['triangles_smooth'].append(f)
elif is_triangle_smooth_uv(f):
data['triangles_smooth_uv'].append(f)
elif is_quad_flat(f):
data['quads_flat'].append(f)
elif is_quad_flat_uv(f):
data['quads_flat_uv'].append(f)
elif is_quad_smooth(f):
data['quads_smooth'].append(f)
elif is_quad_smooth_uv(f):
data['quads_smooth_uv'].append(f)
return data
# #####################################################
# API - ASCII converter
# #####################################################
def convert_ascii(infile, morphfiles, colorfiles, outfile):
"""Convert infile.obj to outfile.js
Here is where everything happens. If you need to automate conversions,
just import this file as Python module and call this method.
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
# parse OBJ / MTL files
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
n_vertices = len(vertices)
n_faces = len(faces)
# align model
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
# generate normals string
nnormal = 0
normals_string = ""
if SHADING == "smooth":
normals_string = ",".join(generate_normal(n) for n in normals)
nnormal = len(normals)
# extract morph vertices
morphTargets = generate_morph_targets(morphfiles, n_vertices, infile)
# extract morph colors
morphColors, colorFaces, materialColors = generate_morph_colors(colorfiles, n_vertices, n_faces)
# generate colors string
ncolor = 0
colors_string = ""
if len(colorFaces) < len(faces):
colorFaces = faces
materialColors = extract_material_colors(materials, mtllib, infile)
if BAKE_COLORS:
colors_string = ",".join(generate_color_decimal(c) for c in materialColors)
ncolor = len(materialColors)
# generate ascii model string
text = TEMPLATE_FILE_ASCII % {
"name" : get_name(outfile),
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nuv" : len(uvs),
"nnormal" : nnormal,
"ncolor" : ncolor,
"nmaterial" : len(materials),
"materials" : generate_materials_string(materials, mtllib, infile),
"normals" : normals_string,
"colors" : colors_string,
"uvs" : ",".join(generate_uv(uv) for uv in uvs),
"vertices" : ",".join(generate_vertex(v, TRUNCATE, SCALE) for v in vertices),
"morphTargets" : morphTargets,
"morphColors" : morphColors,
"faces" : ",".join(generate_face(f, fc) for f, fc in zip(faces, colorFaces)),
"scale" : SCALE
}
out = open(outfile, "w")
out.write(text)
out.close()
print "%d vertices, %d faces, %d materials" % (len(vertices), len(faces), len(materials))
# #############################################################################
# API - Binary converter
# #############################################################################
def dump_materials_to_buffer(faces, buffer):
for f in faces:
data = struct.pack('<H',
f['material'])
buffer.append(data)
def dump_vertices3_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<III',
vi[0]-1, vi[1]-1, vi[2]-1)
buffer.append(data)
def dump_vertices4_to_buffer(faces, buffer):
for f in faces:
vi = f['vertex']
data = struct.pack('<IIII',
vi[0]-1, vi[1]-1, vi[2]-1, vi[3]-1)
buffer.append(data)
def dump_normals3_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<III',
ni[0]-1, ni[1]-1, ni[2]-1)
buffer.append(data)
def dump_normals4_to_buffer(faces, buffer):
for f in faces:
ni = f['normal']
data = struct.pack('<IIII',
ni[0]-1, ni[1]-1, ni[2]-1, ni[3]-1)
buffer.append(data)
def dump_uvs3_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<III',
ui[0]-1, ui[1]-1, ui[2]-1)
buffer.append(data)
def dump_uvs4_to_buffer(faces, buffer):
for f in faces:
ui = f['uv']
data = struct.pack('<IIII',
ui[0]-1, ui[1]-1, ui[2]-1, ui[3]-1)
buffer.append(data)
def add_padding(buffer, n):
if n % 4:
for i in range(4 - n % 4):
data = struct.pack('<B', 0)
buffer.append(data)
def convert_binary(infile, outfile):
"""Convert infile.obj to outfile.js + outfile.bin
"""
if not file_exists(infile):
print "Couldn't find [%s]" % infile
return
binfile = get_name(outfile) + ".bin"
faces, vertices, uvs, normals, materials, mtllib = parse_obj(infile)
if ALIGN == "center":
center(vertices)
elif ALIGN == "centerxz":
centerxz(vertices)
elif ALIGN == "bottom":
bottom(vertices)
elif ALIGN == "top":
top(vertices)
sfaces = sort_faces(faces)
if SHADING == "smooth":
nnormals = len(normals)
else:
nnormals = 0
# ###################
# generate JS file
# ###################
text = TEMPLATE_FILE_BIN % {
"name" : get_name(outfile),
"materials" : generate_materials_string(materials, mtllib, infile),
"buffers" : binfile,
"fname" : os.path.basename(infile),
"nvertex" : len(vertices),
"nface" : len(faces),
"nmaterial" : len(materials),
"nnormal" : nnormals,
"nuv" : len(uvs)
}
out = open(outfile, "w")
out.write(text)
out.close()
# ###################
# generate BIN file
# ###################
buffer = []
# header
# ------
header_bytes = struct.calcsize('<12s')
header_bytes += struct.calcsize('<BBBBBBBB')
header_bytes += struct.calcsize('<IIIIIIIIIII')
# signature
signature = struct.pack('<12s', 'Three.js 003')
# metadata (all data is little-endian)
vertex_coordinate_bytes = 4
normal_coordinate_bytes = 1
uv_coordinate_bytes = 4
vertex_index_bytes = 4
normal_index_bytes = 4
uv_index_bytes = 4
material_index_bytes = 2
# header_bytes unsigned char 1
# vertex_coordinate_bytes unsigned char 1
# normal_coordinate_bytes unsigned char 1
# uv_coordinate_bytes unsigned char 1
# vertex_index_bytes unsigned char 1
# normal_index_bytes unsigned char 1
# uv_index_bytes unsigned char 1
# material_index_bytes unsigned char 1
bdata = struct.pack('<BBBBBBBB', header_bytes,
vertex_coordinate_bytes,
normal_coordinate_bytes,
uv_coordinate_bytes,
vertex_index_bytes,
normal_index_bytes,
uv_index_bytes,
material_index_bytes)
ntri_flat = len(sfaces['triangles_flat'])
ntri_smooth = len(sfaces['triangles_smooth'])
ntri_flat_uv = len(sfaces['triangles_flat_uv'])
ntri_smooth_uv = len(sfaces['triangles_smooth_uv'])
nquad_flat = len(sfaces['quads_flat'])
nquad_smooth = len(sfaces['quads_smooth'])
nquad_flat_uv = len(sfaces['quads_flat_uv'])
nquad_smooth_uv = len(sfaces['quads_smooth_uv'])
# nvertices unsigned int 4
# nnormals unsigned int 4
# nuvs unsigned int 4
# ntri_flat unsigned int 4
# ntri_smooth unsigned int 4
# ntri_flat_uv unsigned int 4
# ntri_smooth_uv unsigned int 4
# nquad_flat unsigned int 4
# nquad_smooth unsigned int 4
# nquad_flat_uv unsigned int 4
# nquad_smooth_uv unsigned int 4
ndata = struct.pack('<IIIIIIIIIII', len(vertices),
nnormals,
len(uvs),
ntri_flat,
ntri_smooth,
ntri_flat_uv,
ntri_smooth_uv,
nquad_flat,
nquad_smooth,
nquad_flat_uv,
nquad_smooth_uv)
buffer.append(signature)
buffer.append(bdata)
buffer.append(ndata)
# 1. vertices
# ------------
# x float 4
# y float 4
# z float 4
for v in vertices:
data = struct.pack('<fff', v[0], v[1], v[2])
buffer.append(data)
# 2. normals
# ---------------
# x signed char 1
# y signed char 1
# z signed char 1
if SHADING == "smooth":
for n in normals:
normalize(n)
data = struct.pack('<bbb', math.floor(n[0]*127+0.5),
math.floor(n[1]*127+0.5),
math.floor(n[2]*127+0.5))
buffer.append(data)
add_padding(buffer, nnormals * 3)
# 3. uvs
# -----------
# u float 4
# v float 4
for uv in uvs:
data = struct.pack('<ff', uv[0], uv[1])
buffer.append(data)
# padding
#data = struct.pack('<BB', 0, 0)
#buffer.append(data)
# 4. flat triangles (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# ------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat'], buffer)
add_padding(buffer, ntri_flat * 2)
# 5. smooth triangles (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# -------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# -------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth'], buffer)
add_padding(buffer, ntri_smooth * 2)
# 6. flat triangles uv (vertices + materials + uvs)
# --------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_flat_uv'], buffer)
add_padding(buffer, ntri_flat_uv * 2)
# 7. smooth triangles uv (vertices + materials + normals + uvs)
# ----------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_normals3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_uvs3_to_buffer(sfaces['triangles_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['triangles_smooth_uv'], buffer)
add_padding(buffer, ntri_smooth_uv * 2)
# 8. flat quads (vertices + materials)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat'], buffer)
dump_materials_to_buffer(sfaces['quads_flat'], buffer)
add_padding(buffer, nquad_flat * 2)
# 9. smooth quads (vertices + materials + normals)
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth'], buffer)
add_padding(buffer, nquad_smooth * 2)
# 10. flat quads uv (vertices + materials + uvs)
# ------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_flat_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_flat_uv'], buffer)
add_padding(buffer, nquad_flat_uv * 2)
# 11. smooth quads uv
# -------------------
# a unsigned int 4
# b unsigned int 4
# c unsigned int 4
# d unsigned int 4
# --------------------
# na unsigned int 4
# nb unsigned int 4
# nc unsigned int 4
# nd unsigned int 4
# --------------------
# ua unsigned int 4
# ub unsigned int 4
# uc unsigned int 4
# ud unsigned int 4
# --------------------
# m unsigned short 2
dump_vertices4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_normals4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_uvs4_to_buffer(sfaces['quads_smooth_uv'], buffer)
dump_materials_to_buffer(sfaces['quads_smooth_uv'], buffer)
add_padding(buffer, nquad_smooth_uv * 2)
path = os.path.dirname(outfile)
fname = os.path.join(path, binfile)
out = open(fname, "wb")
out.write("".join(buffer))
out.close()
# #############################################################################
# Helpers
# #############################################################################
def usage():
print "Usage: %s -i filename.obj -o filename.js [-m morphfiles*.obj] [-c morphcolors*.obj] [-a center|top|bottom] [-s flat|smooth] [-t binary|ascii] [-d invert|normal]" % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hbi:m:c:b:o:a:s:t:d:x:f:", ["help", "bakecolors", "input=", "morphs=", "colors=", "output=", "align=", "shading=", "type=", "dissolve=", "truncatescale=", "framestep="])
except getopt.GetoptError:
usage()
sys.exit(2)
infile = outfile = ""
morphfiles = ""
colorfiles = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
infile = a
elif o in ("-m", "--morphs"):
morphfiles = a
elif o in ("-c", "--colors"):
colorfiles = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-a", "--align"):
if a in ("top", "bottom", "center", "centerxz", "none"):
ALIGN = a
elif o in ("-s", "--shading"):
if a in ("flat", "smooth"):
SHADING = a
elif o in ("-t", "--type"):
if a in ("binary", "ascii"):
TYPE = a
elif o in ("-d", "--dissolve"):
if a in ("normal", "invert"):
TRANSPARENCY = a
elif o in ("-b", "--bakecolors"):
BAKE_COLORS = True
elif o in ("-x", "--truncatescale"):
TRUNCATE = True
SCALE = float(a)
elif o in ("-f", "--framestep"):
FRAMESTEP = int(a)
if infile == "" or outfile == "":
usage()
sys.exit(2)
print "Converting [%s] into [%s] ..." % (infile, outfile)
if morphfiles:
print "Morphs [%s]" % morphfiles
if colorfiles:
print "Colors [%s]" % colorfiles
if TYPE == "ascii":
convert_ascii(infile, morphfiles, colorfiles, outfile)
elif TYPE == "binary":
convert_binary(infile, outfile)
| mit |
2014c2g19/2014c2g19 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/_threading_local.py | 923 | 7410 | """Thread-local objects.
(Note that this module provides a Python version of the threading.local
class. Depending on the version of Python you're using, there may be a
faster one available. You should always import the `local` class from
`threading`.)
Thread-local objects support the management of thread-local data.
If you have data that you want to be local to a thread, simply create
a thread-local object and use its attributes:
>>> mydata = local()
>>> mydata.number = 42
>>> mydata.number
42
You can also access the local-object's dictionary:
>>> mydata.__dict__
{'number': 42}
>>> mydata.__dict__.setdefault('widgets', [])
[]
>>> mydata.widgets
[]
What's important about thread-local objects is that their data are
local to a thread. If we access the data in a different thread:
>>> log = []
>>> def f():
... items = sorted(mydata.__dict__.items())
... log.append(items)
... mydata.number = 11
... log.append(mydata.number)
>>> import threading
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[], 11]
we get different data. Furthermore, changes made in the other thread
don't affect data seen in this thread:
>>> mydata.number
42
Of course, values you get from a local object, including a __dict__
attribute, are for whatever thread was current at the time the
attribute was read. For that reason, you generally don't want to save
these values across threads, as they apply only to the thread they
came from.
You can create custom local objects by subclassing the local class:
>>> class MyLocal(local):
... number = 2
... initialized = False
... def __init__(self, **kw):
... if self.initialized:
... raise SystemError('__init__ called too many times')
... self.initialized = True
... self.__dict__.update(kw)
... def squared(self):
... return self.number ** 2
This can be useful to support default values, methods and
initialization. Note that if you define an __init__ method, it will be
called each time the local object is used in a separate thread. This
is necessary to initialize each thread's dictionary.
Now if we create a local object:
>>> mydata = MyLocal(color='red')
Now we have a default number:
>>> mydata.number
2
an initial color:
>>> mydata.color
'red'
>>> del mydata.color
And a method that operates on the data:
>>> mydata.squared()
4
As before, we can access the data in a separate thread:
>>> log = []
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
>>> log
[[('color', 'red'), ('initialized', True)], 11]
without affecting this thread's data:
>>> mydata.number
2
>>> mydata.color
Traceback (most recent call last):
...
AttributeError: 'MyLocal' object has no attribute 'color'
Note that subclasses can define slots, but they are not thread
local. They are shared across threads:
>>> class MyLocal(local):
... __slots__ = 'number'
>>> mydata = MyLocal()
>>> mydata.number = 42
>>> mydata.color = 'red'
So, the separate thread:
>>> thread = threading.Thread(target=f)
>>> thread.start()
>>> thread.join()
affects what we see:
>>> mydata.number
11
>>> del mydata
"""
from weakref import ref
from contextlib import contextmanager
__all__ = ["local"]
# We need to use objects from the threading module, but the threading
# module may also want to use our `local` class, if support for locals
# isn't compiled in to the `thread` module. This creates potential problems
# with circular imports. For that reason, we don't import `threading`
# until the bottom of this file (a hack sufficient to worm around the
# potential problems). Note that all platforms on CPython do have support
# for locals in the `thread` module, and there is no circular import problem
# then, so problems introduced by fiddling the order of imports here won't
# manifest.
class _localimpl:
"""A class managing thread-local dicts"""
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
def __init__(self):
# The key used in the Thread objects' attribute dicts.
# We keep it a string for speed but make it unlikely to clash with
# a "real" attribute.
self.key = '_threading_local._localimpl.' + str(id(self))
# { id(Thread) -> (ref(Thread), thread-local dict) }
self.dicts = {}
def get_dict(self):
"""Return the dict for the current thread. Raises KeyError if none
defined."""
thread = current_thread()
return self.dicts[id(thread)][1]
def create_dict(self):
"""Create a new dict for the current thread, and return it."""
localdict = {}
key = self.key
thread = current_thread()
idt = id(thread)
def local_deleted(_, key=key):
# When the localimpl is deleted, remove the thread attribute.
thread = wrthread()
if thread is not None:
del thread.__dict__[key]
def thread_deleted(_, idt=idt):
# When the thread is deleted, remove the local dict.
# Note that this is suboptimal if the thread object gets
# caught in a reference loop. We would like to be called
# as soon as the OS-level thread ends instead.
local = wrlocal()
if local is not None:
dct = local.dicts.pop(idt)
wrlocal = ref(self, local_deleted)
wrthread = ref(thread, thread_deleted)
thread.__dict__[key] = wrlocal
self.dicts[idt] = wrthread, localdict
return localdict
@contextmanager
def _patch(self):
impl = object.__getattribute__(self, '_local__impl')
try:
dct = impl.get_dict()
except KeyError:
dct = impl.create_dict()
args, kw = impl.localargs
self.__init__(*args, **kw)
with impl.locallock:
object.__setattr__(self, '__dict__', dct)
yield
class local:
__slots__ = '_local__impl', '__dict__'
def __new__(cls, *args, **kw):
if (args or kw) and (cls.__init__ is object.__init__):
raise TypeError("Initialization arguments are not supported")
self = object.__new__(cls)
impl = _localimpl()
impl.localargs = (args, kw)
impl.locallock = RLock()
object.__setattr__(self, '_local__impl', impl)
# We need to create the thread dict in anticipation of
# __init__ being called, to make sure we don't call it
# again ourselves.
impl.create_dict()
return self
def __getattribute__(self, name):
with _patch(self):
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__setattr__(self, name, value)
def __delattr__(self, name):
if name == '__dict__':
raise AttributeError(
"%r object attribute '__dict__' is read-only"
% self.__class__.__name__)
with _patch(self):
return object.__delattr__(self, name)
from threading import current_thread, RLock
| gpl-2.0 |
mudithkr/zamboni | mkt/api/authentication.py | 19 | 1218 | from django.contrib.auth.models import AnonymousUser
import commonware.log
from rest_framework.authentication import BaseAuthentication
log = commonware.log.getLogger('z.api')
class OAuthError(RuntimeError):
def __init__(self, message='OAuth error occured.'):
self.message = message
class RestOAuthAuthentication(BaseAuthentication):
def authenticate(self, request):
# Most of the work here is in the RestOAuthMiddleware.
if (request._request.user.is_authenticated() and
'RestOAuth' in getattr(request._request, 'authed_from', [])):
request.user = request._request.user
return request.user, None
class RestSharedSecretAuthentication(BaseAuthentication):
def authenticate(self, request):
# Most of the work here is in the RestSharedSecretMiddleware.
if (request._request.user.is_authenticated() and
'RestSharedSecret' in getattr(
request._request, 'authed_from', [])):
request.user = request._request.user
return request.user, None
class RestAnonymousAuthentication(BaseAuthentication):
def authenticate(self, request):
return AnonymousUser(), None
| bsd-3-clause |
dscorbett/pygments | pygments/lexers/apl.py | 1 | 3238 | # -*- coding: utf-8 -*-
"""
pygments.lexers.apl
~~~~~~~~~~~~~~~~~~~
Lexers for APL.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['APLLexer']
class APLLexer(RegexLexer):
"""
A simple `APL <https://en.m.wikipedia.org/wiki/APL_(programming_language)>`_ lexer.
.. versionadded:: 2.0
"""
name = 'APL'
aliases = ['apl']
filenames = ['*.apl']
tokens = {
'root': [
# Whitespace
# ==========
(r'\s+', Text),
#
# Comment
# =======
# '⍝' is traditional; '#' is supported by GNU APL and NGN (but not Dyalog)
(u'[⍝#].*$', Comment.Single),
#
# Strings
# =======
(r'\'((\'\')|[^\'])*\'', String.Single),
(r'"(("")|[^"])*"', String.Double), # supported by NGN APL
#
# Punctuation
# ===========
# This token type is used for diamond and parenthesis
# but not for bracket and ; (see below)
(u'[⋄◇()]', Punctuation),
#
# Array indexing
# ==============
# Since this token type is very important in APL, it is not included in
# the punctuation token type but rather in the following one
(r'[\[\];]', String.Regex),
#
# Distinguished names
# ===================
# following IBM APL2 standard
(u'⎕[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Function),
#
# Labels
# ======
# following IBM APL2 standard
# (u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*:', Name.Label),
#
# Variables
# =========
# following IBM APL2 standard
(u'[A-Za-zΔ∆⍙][A-Za-zΔ∆⍙_¯0-9]*', Name.Variable),
#
# Numbers
# =======
(u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
(u'[\\.\\\\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
(u'[+\\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
Operator),
#
# Constant
# ========
(u'⍬', Name.Constant),
#
# Quad symbol
# ===========
(u'[⎕⍞]', Name.Variable.Global),
#
# Arrows left/right
# =================
(u'[←→]', Keyword.Declaration),
#
# D-Fn
# ====
(u'[⍺⍵⍶⍹∇:]', Name.Builtin.Pseudo),
(r'[{}]', Keyword.Type),
],
}
| bsd-2-clause |
mglukhikh/intellij-community | python/helpers/py2only/docutils/transforms/misc.py | 183 | 4882 | # $Id: misc.py 6314 2010-04-26 10:04:17Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous transforms.
"""
__docformat__ = 'reStructuredText'
from docutils import nodes
from docutils.transforms import Transform, TransformError
class CallBack(Transform):
"""
Inserts a callback into a document. The callback is called when the
transform is applied, which is determined by its priority.
For use with `nodes.pending` elements. Requires a ``details['callback']``
entry, a bound method or function which takes one parameter: the pending
node. Other data can be stored in the ``details`` attribute or in the
object hosting the callback method.
"""
default_priority = 990
def apply(self):
pending = self.startnode
pending.details['callback'](pending)
pending.parent.remove(pending)
class ClassAttribute(Transform):
"""
Move the "class" attribute specified in the "pending" node into the
immediately following non-comment element.
"""
default_priority = 210
def apply(self):
pending = self.startnode
parent = pending.parent
child = pending
while parent:
# Check for appropriate following siblings:
for index in range(parent.index(child) + 1, len(parent)):
element = parent[index]
if (isinstance(element, nodes.Invisible) or
isinstance(element, nodes.system_message)):
continue
element['classes'] += pending.details['class']
pending.parent.remove(pending)
return
else:
# At end of section or container; apply to sibling
child = parent
parent = parent.parent
error = self.document.reporter.error(
'No suitable element following "%s" directive'
% pending.details['directive'],
nodes.literal_block(pending.rawsource, pending.rawsource),
line=pending.line)
pending.replace_self(error)
class Transitions(Transform):
"""
Move transitions at the end of sections up the tree. Complain
on transitions after a title, at the beginning or end of the
document, and after another transition.
For example, transform this::
<section>
...
<transition>
<section>
...
into this::
<section>
...
<transition>
<section>
...
"""
default_priority = 830
def apply(self):
for node in self.document.traverse(nodes.transition):
self.visit_transition(node)
def visit_transition(self, node):
index = node.parent.index(node)
error = None
if (index == 0 or
isinstance(node.parent[0], nodes.title) and
(index == 1 or
isinstance(node.parent[1], nodes.subtitle) and
index == 2)):
assert (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.section))
error = self.document.reporter.error(
'Document or section may not begin with a transition.',
source=node.source, line=node.line)
elif isinstance(node.parent[index - 1], nodes.transition):
error = self.document.reporter.error(
'At least one body element must separate transitions; '
'adjacent transitions are not allowed.',
source=node.source, line=node.line)
if error:
# Insert before node and update index.
node.parent.insert(index, error)
index += 1
assert index < len(node.parent)
if index != len(node.parent) - 1:
# No need to move the node.
return
# Node behind which the transition is to be moved.
sibling = node
# While sibling is the last node of its parent.
while index == len(sibling.parent) - 1:
sibling = sibling.parent
# If sibling is the whole document (i.e. it has no parent).
if sibling.parent is None:
# Transition at the end of document. Do not move the
# transition up, and place an error behind.
error = self.document.reporter.error(
'Document may not end with a transition.',
line=node.line)
node.parent.insert(node.parent.index(node) + 1, error)
return
index = sibling.parent.index(sibling)
# Remove the original transition node.
node.parent.remove(node)
# Insert the transition after the sibling.
sibling.parent.insert(index + 1, node)
| apache-2.0 |
postlund/home-assistant | homeassistant/components/ambient_station/sensor.py | 2 | 2630 | """Support for Ambient Weather Station sensors."""
import logging
from homeassistant.const import ATTR_NAME
from . import (
SENSOR_TYPES,
TYPE_SOLARRADIATION,
TYPE_SOLARRADIATION_LX,
AmbientWeatherEntity,
)
from .const import (
ATTR_LAST_DATA,
ATTR_MONITORED_CONDITIONS,
DATA_CLIENT,
DOMAIN,
TYPE_SENSOR,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Ambient PWS sensors based on a config entry."""
ambient = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
sensor_list = []
for mac_address, station in ambient.stations.items():
for condition in station[ATTR_MONITORED_CONDITIONS]:
name, unit, kind, device_class = SENSOR_TYPES[condition]
if kind == TYPE_SENSOR:
sensor_list.append(
AmbientWeatherSensor(
ambient,
mac_address,
station[ATTR_NAME],
condition,
name,
device_class,
unit,
)
)
async_add_entities(sensor_list, True)
class AmbientWeatherSensor(AmbientWeatherEntity):
"""Define an Ambient sensor."""
def __init__(
self,
ambient,
mac_address,
station_name,
sensor_type,
sensor_name,
device_class,
unit,
):
"""Initialize the sensor."""
super().__init__(
ambient, mac_address, station_name, sensor_type, sensor_name, device_class
)
self._unit = unit
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
async def async_update(self):
"""Fetch new state data for the sensor."""
if self._sensor_type == TYPE_SOLARRADIATION_LX:
# If the user requests the solarradiation_lx sensor, use the
# value of the solarradiation sensor and apply a very accurate
# approximation of converting sunlight W/m^2 to lx:
w_m2_brightness_val = self._ambient.stations[self._mac_address][
ATTR_LAST_DATA
].get(TYPE_SOLARRADIATION)
self._state = round(float(w_m2_brightness_val) / 0.0079)
else:
self._state = self._ambient.stations[self._mac_address][ATTR_LAST_DATA].get(
self._sensor_type
)
| apache-2.0 |
christydennison/ResumeStripClub | convert.py | 1 | 3312 | #!/usr/bin/env python
import sys
import os
import pdf2txt
import re
REDACTED_TEXT = 'REDACTED'
LINK_REGEX = re.compile('(https?:\/\/)?([a-zA-Z0-9]{2,4}\.)?(linkedin.com|lnkd\.in|github.com)\/.+')
EMAIL_REGEX = re.compile('([\w\.]+@(?:[\w]+\.)+[a-zA-Z]{2,})')
BLACKLIST_FILE = "bad_words.txt"
def get_blacklist_words():
blacklist = []
try:
with open(BLACKLIST_FILE) as f:
lines = f.read().splitlines()
for line in lines:
if line:
blacklist.append(line.lower().strip())
except Exception as e:
print "Unable to read bad words from {0}. Error: {1}".format(BLACKLIST_FILE, e)
return set(blacklist)
def join_newlines(array):
return '\n'.join(array)
def redact_initial(file_lines, lastname):
processed_file_lines = []
fullname = ''
firstname = ''
for index, line in enumerate(file_lines):
newline = line
links = LINK_REGEX.search(newline.replace(" ", ""))
if links:
matching_text = links.group()
# print 'links!', matching_text
newline = newline.replace(" ", "").replace(matching_text, REDACTED_TEXT + ' PROFILE')
# print newline
emails = EMAIL_REGEX.search(newline.replace(" ", ""))
if emails:
matching_text = emails.group(1)
# print 'emails!', matching_text
newline = newline.replace(" ", "").replace(matching_text, REDACTED_TEXT + ' EMAIL')
# print newline
if lastname.lower() in newline.lower() or lastname.lower() in newline.lower().replace(" ", ""):
fullname = newline.replace(" ", "")
firstname = re.split(lastname, fullname, flags=re.IGNORECASE)[0]
print fullname
print firstname
newline = newline.replace(" ", "").replace(firstname, firstname[0] + '. ')
# print 'name',firstname
# print newline
processed_file_lines.append(newline)
return processed_file_lines
def redact(list_of_lines):
output = []
blacklist = get_blacklist_words()
for line in list_of_lines:
newline = line
for word in blacklist:
to_replace = re.compile("[^\w]{0}[^\w]".format(word), re.IGNORECASE)
newline = to_replace.sub(" {} ".format(REDACTED_TEXT), newline)
# print newline
output.append(newline)
return output
def process(fname):
lastname = '.'.join(os.path.basename(fname).split(".")[:-1])
print 'Using name', lastname
pathname = os.path.dirname(fname)
file_path = os.path.join(pathname, lastname)
txt_file_path = file_path + '.txt'
redacted_file_path = file_path + '_redacted.txt'
# os.remove(redacted_file_path)
pdf2txt.main(['', '-o', txt_file_path, fname])
with open(txt_file_path) as f:
lines = f.read().splitlines()
names_redacted = redact_initial(lines, lastname)
output = redact(names_redacted)
with open(redacted_file_path, 'w') as ofile:
ofile.write(join_newlines(output))
if __name__ == "__main__":
filenames = []
if len(sys.argv) > 1:
filenames = sys.argv[1:]
else:
print "You must give at least one file to process"
sys.exit(1)
for filename in filenames:
process(filename)
| mit |
javiergarridomellado/Empresa_django | devcodela/lib/python2.7/site-packages/django/db/backends/__init__.py | 52 | 38612 | from django.db.utils import DatabaseError
try:
from django.utils.six.moves import _thread as thread
except ImportError:
from django.utils.six.moves import _dummy_thread as thread
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.importlib import import_module
from django.utils import six
from django.utils.timezone import is_aware
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = thread.get_ident()
self.allow_thread_sharing = allow_thread_sharing
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alias)
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self._rollback()
self._dirty = False
while self.transaction_state:
self.leave_transaction_management()
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError(
"This code isn't under transaction management")
# We will pass the next status (after leaving the previous state
# behind) to subclass hook.
self._leave_transaction_management(self.is_managed())
if self._dirty:
self.rollback()
raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK")
self._dirty = False
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if (not self.allow_thread_sharing
and self._thread_ident != thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_commit(sid)
@contextmanager
def constraint_checks_disabled(self):
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key constraint
checking.
"""
pass
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.
"""
pass
def close(self):
self.validate_thread_sharing()
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"Confirm support for transactions"
try:
# Make sure to run inside a managed transaction block,
# otherwise autocommit will cause the confimation to
# fail.
self.connection.enter_transaction_management()
self.connection.managed(True)
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
self.connection._dirty = False
finally:
self.connection.leave_transaction_management()
return count == 0
@cached_property
def supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
return True
except NotImplementedError:
return False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns a SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import force_text
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return force_text(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import force_text
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent type
that is compatible with the field type.
"""
if value is None:
return value
internal_type = field.get_internal_type()
if internal_type == 'FloatField':
return float(value)
elif (internal_type and (internal_type.endswith('IntegerField')
or internal_type == 'AutoField')):
return int(value)
return value
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
"""
for column in six.iteritems(self.get_indexes(cursor, table_name)):
if column[1]['primary_key']:
return column[0]
return None
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| gpl-2.0 |
aglitke/vdsm | doc/conf.py | 2 | 6556 | # -*- coding: utf-8 -*-
#
# VDSM documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 24 14:07:47 2009.
#
# This file is execfile()d with the current directory set to its containing dir
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extension
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'VDSM'
copyright = u'2013, Red Hat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0'
# The full version, including alpha/beta/rc tags.
release = '3.0beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'vdsmdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [('index', 'Vdsm.tex', u'VDSM Documentation',
u'Red Hat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| gpl-2.0 |
hsum/sqlalchemy | examples/performance/bulk_updates.py | 27 | 1445 | """This series of tests illustrates different ways to UPDATE a large number
of rows in bulk.
"""
from . import Profiler
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, create_engine, bindparam
from sqlalchemy.orm import Session
Base = declarative_base()
engine = None
class Customer(Base):
__tablename__ = "customer"
id = Column(Integer, primary_key=True)
name = Column(String(255))
description = Column(String(255))
Profiler.init("bulk_updates", num=100000)
@Profiler.setup
def setup_database(dburl, echo, num):
global engine
engine = create_engine(dburl, echo=echo)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
s = Session(engine)
for chunk in range(0, num, 10000):
s.bulk_insert_mappings(Customer, [
{
'name': 'customer name %d' % i,
'description': 'customer description %d' % i
} for i in range(chunk, chunk + 10000)
])
s.commit()
@Profiler.profile
def test_orm_flush(n):
"""UPDATE statements via the ORM flush process."""
session = Session(bind=engine)
for chunk in range(0, n, 1000):
customers = session.query(Customer).\
filter(Customer.id.between(chunk, chunk + 1000)).all()
for customer in customers:
customer.description += "updated"
session.flush()
session.commit()
| mit |
murali-munna/scikit-learn | sklearn/externals/joblib/_multiprocessing_helpers.py | 326 | 1214 | """Helper module to factorize the conditional multiprocessing import logic
We use a distinct module to simplify import statements and avoid introducing
circular dependencies (for instance for the assert_spawning name).
"""
import os
import warnings
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
import multiprocessing.pool
except ImportError:
mp = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if mp is not None:
try:
_sem = mp.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
# 3rd stage: backward compat for the assert_spawning helper
if mp is not None:
try:
# Python 3.4+
from multiprocessing.context import assert_spawning
except ImportError:
from multiprocessing.forking import assert_spawning
else:
assert_spawning = None
| bsd-3-clause |
larrybradley/astropy | astropy/extern/_strptime.py | 12 | 22516 | """Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
# -----------------------------------------------------------------------------
# _strptime.py
#
# Licensed under PYTHON SOFTWARE FOUNDATION LICENSE
# See licenses/PYTHON.rst
#
# Copied from https://github.com/python/cpython/blob/3.5/Lib/_strptime.py
# -----------------------------------------------------------------------------
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import (date as datetime_date,
timedelta as datetime_timedelta,
timezone as datetime_timezone)
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
# Figure out what the current language is set to.
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
if time.tzname != self.tzname or time.daylight != self.daylight:
raise ValueError("timezone changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == time.tzname[1]
# and time.daylight; handle that in strptime.
try:
time.tzset()
except AttributeError:
pass
self.tzname = time.tzname
self.daylight = time.daylight
no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
if self.daylight:
has_saving = frozenset({self.tzname[1].lower()})
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({
# The " \d" part of the regex is to make %c from ANSI C work
'd': r"(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])",
'f': r"(?P<f>[0-9]{1,6})",
'H': r"(?P<H>2[0-3]|[0-1]\d|\d)",
'I': r"(?P<I>1[0-2]|0[1-9]|[1-9])",
'j': r"(?P<j>36[0-6]|3[0-5]\d|[1-2]\d\d|0[1-9]\d|00[1-9]|[1-9]\d|0[1-9]|[1-9])",
'm': r"(?P<m>1[0-2]|0[1-9]|[1-9])",
'M': r"(?P<M>[0-5]\d|\d)",
'S': r"(?P<S>6[0-1]|[0-5]\d|\d)",
'U': r"(?P<U>5[0-3]|[0-4]\d|\d)",
'w': r"(?P<w>[0-6])",
# W is set below by using 'U'
'y': r"(?P<y>\d\d)",
#XXX: Does 'Y' need to worry about having less or more than
# 4 digits?
'Y': r"(?P<Y>\d\d\d\d)",
'z': r"(?P<z>[+-]\d\d[0-5]\d)",
'A': self.__seqToRE(self.locale_time.f_weekday, 'A'),
'a': self.__seqToRE(self.locale_time.a_weekday, 'a'),
'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'),
'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'),
'p': self.__seqToRE(self.locale_time.am_pm, 'p'),
'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone
for tz in tz_names),
'Z'),
'%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
# The sub() call escapes all characters that might be misconstrued
# as regex syntax. Cannot use re.escape since we have to deal with
# format directives (%m, etc.).
regex_chars = re_compile(r"([\\.^$*+?\(\){}\[\]|])")
format = regex_chars.sub(r"\\\1", format)
whitespace_replacement = re_compile(r'\s+')
format = whitespace_replacement.sub(r'\\s+', format)
while '%' in format:
directive_index = format.index('%')+1
processed_format = "%s%s%s" % (processed_format,
format[:directive_index-1],
self[format[directive_index]])
format = format[directive_index+1:]
return "%s%s" % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
# DO NOT modify _TimeRE_cache or _regex_cache without acquiring the cache lock
# first!
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5 # Max number of regexes stored in _regex_cache
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
# If we are dealing with the %U directive (week starts on Sunday), it's
# easier to just shift the view to Sunday being the first day of the
# week.
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
# Need to watch out for a week 0 (when the first day of the year is not
# the same as that specified by %U or %W).
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + (7 * (week_of_year - 1))
return 1 + days_to_week + day_of_week
def _strptime(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = "strptime() argument {} must be str, not {}"
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
locale_time = _TimeRE_cache.locale_time
if (_getlang() != locale_time.lang or
time.tzname != locale_time.tzname or
time.daylight != locale_time.daylight):
_TimeRE_cache = TimeRE()
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
# KeyError raised when a bad format is found; can be specified as
# \\, in which case it was a stray % but with a space after it
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == "\\":
bad_directive = "%"
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
# IndexError only occurs when the format string is "%"
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError("time data %r does not match format %r" %
(data_string, format))
if len(data_string) != found.end():
raise ValueError("unconverted data remains: %s" %
data_string[found.end():])
year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
# Default to -1 to signify that values not known; not critical to have,
# though
week_of_year = -1
week_of_year_start = -1
# weekday and julian defaulted to None so as to signal need to calculate
# values
weekday = julian = None
found_dict = found.groupdict()
for group_key in found_dict.keys():
# Directives not explicitly handled below:
# c, x, X
# handled by making out of other directives
# U, W
# worthless without day of the week
if group_key == 'y':
year = int(found_dict['y'])
# Open Group specification for strptime() states that a %y
#value in the range of [00, 68] is in the century 2000, while
#[69,99] is in the century 1900
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
# If there was no AM/PM indicator, we'll treat this like AM
if ampm in ('', locale_time.am_pm[0]):
# We're in AM so the hour is correct unless we're
# looking at 12 midnight.
# 12 midnight == 12 AM == hour 0
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
# We're in PM so we need to add 12 to the hour unless
# we're looking at 12 noon.
# 12 noon == 12 PM == hour 12
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
# Pad to always return microseconds.
s += "0" * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
# U starts week on Sunday.
week_of_year_start = 6
else:
# W starts week on Monday.
week_of_year_start = 0
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith("-"):
tzoffset = -tzoffset
elif group_key == 'Z':
# Since -1 is default value only need to worry about setting tz if
# it can be something other than -1.
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
# Deal with bad locale setup where timezone names are the
# same and yet time.daylight is true; too ambiguous to
# be able to tell what timezone has daylight savings
if (time.tzname[0] == time.tzname[1] and
time.daylight and found_zone not in ("utc", "gmt")):
break
else:
tz = value
break
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904 # 1904 is first leap year of 20th century
leap_year_fix = True
elif year is None:
year = 1900
# If we know the week of the year and what day of that week, we can figure
# out the Julian day of the year.
if julian is None and week_of_year != -1 and weekday is not None:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
if julian <= 0:
year -= 1
yday = 366 if calendar.isleap(year) else 365
julian += yday
# Cannot pre-calculate datetime_date() since can change in Julian
# calculation and thus could have different value for the day of the week
# calculation.
if julian is None:
# Need to add 1 to result since first day of the year is 1, not 0.
julian = datetime_date(year, month, day).toordinal() - \
datetime_date(year, 1, 1).toordinal() + 1
else: # Assume that if they bothered to include Julian day it will
# be accurate.
datetime_result = datetime_date.fromordinal((julian - 1) + datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday is None:
weekday = datetime_date(year, month, day).weekday()
# Add timezone info
tzname = found_dict.get("Z")
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
# the caller didn't supply a year but asked for Feb 29th. We couldn't
# use the default of 1900 for computations. We set it back to ensure
# that February 29th is smaller than March 1st.
year = 1900
return (year, month, day,
hour, minute, second,
weekday, julian, tz, tzname, gmtoff), fraction
def _strptime_time(data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format="%a %b %d %H:%M:%S %Y"):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += (tz,)
return cls(*args)
| bsd-3-clause |
atsolakid/edx-platform | lms/djangoapps/mobile_api/social_facebook/friends/tests.py | 128 | 14336 | # pylint: disable=E1101
"""
Tests for friends
"""
import json
import httpretty
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.factories import CourseFactory
from ..test_utils import SocialFacebookTestCase
class TestFriends(SocialFacebookTestCase):
"""
Tests for /api/mobile/v0.5/friends/...
"""
def setUp(self):
super(TestFriends, self).setUp()
self.course = CourseFactory.create()
@httpretty.activate
def test_no_friends_enrolled(self):
# User 1 set up
self.user_create_and_signin(1)
# Link user_1's edX account to FB
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
# Set the interceptor
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
course_id = unicode(self.course.id)
url = reverse('friends-in-course', kwargs={"course_id": course_id})
response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN})
# Assert that no friends are returned
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data and len(response.data['friends']) == 0)
@httpretty.activate
def test_no_friends_on_facebook(self):
# User 1 set up
self.user_create_and_signin(1)
# Enroll user_1 in the course
self.enroll_in_course(self.users[1], self.course)
self.set_sharing_preferences(self.users[1], True)
# Link user_1's edX account to FB
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
# Set the interceptor
self.set_facebook_interceptor_for_friends({'data': []})
course_id = unicode(self.course.id)
url = reverse('friends-in-course', kwargs={"course_id": course_id})
response = self.client.get(
url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}
)
# Assert that no friends are returned
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data and len(response.data['friends']) == 0)
@httpretty.activate
def test_no_friends_linked_to_edx(self):
# User 1 set up
self.user_create_and_signin(1)
# Enroll user_1 in the course
self.enroll_in_course(self.users[1], self.course)
self.set_sharing_preferences(self.users[1], True)
# User 2 set up
self.user_create_and_signin(2)
# Enroll user_2 in the course
self.enroll_in_course(self.users[2], self.course)
self.set_sharing_preferences(self.users[2], True)
# User 3 set up
self.user_create_and_signin(3)
# Enroll user_3 in the course
self.enroll_in_course(self.users[3], self.course)
self.set_sharing_preferences(self.users[3], True)
# Set the interceptor
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
course_id = unicode(self.course.id)
url = reverse('friends-in-course', kwargs={"course_id": course_id})
response = self.client.get(
url,
{'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}
)
# Assert that no friends are returned
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data and len(response.data['friends']) == 0)
@httpretty.activate
def test_no_friends_share_settings_false(self):
# User 1 set up
self.user_create_and_signin(1)
self.enroll_in_course(self.users[1], self.course)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], False)
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)})
response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN})
# Assert that USERNAME_1 is returned
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data)
self.assertTrue('friends' in response.data and len(response.data['friends']) == 0)
@httpretty.activate
def test_no_friends_no_oauth_token(self):
# User 1 set up
self.user_create_and_signin(1)
self.enroll_in_course(self.users[1], self.course)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], False)
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)})
response = self.client.get(url, {'format': 'json'})
# Assert that USERNAME_1 is returned
self.assertEqual(response.status_code, 400)
@httpretty.activate
def test_one_friend_in_course(self):
# User 1 set up
self.user_create_and_signin(1)
self.enroll_in_course(self.users[1], self.course)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)})
response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN})
# Assert that USERNAME_1 is returned
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data)
self.assertTrue('id' in response.data['friends'][0])
self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID'])
self.assertTrue('name' in response.data['friends'][0])
self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME'])
@httpretty.activate
def test_three_friends_in_course(self):
# User 1 set up
self.user_create_and_signin(1)
self.enroll_in_course(self.users[1], self.course)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
# User 2 set up
self.user_create_and_signin(2)
self.enroll_in_course(self.users[2], self.course)
self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID'])
self.set_sharing_preferences(self.users[2], True)
# User 3 set up
self.user_create_and_signin(3)
self.enroll_in_course(self.users[3], self.course)
self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID'])
self.set_sharing_preferences(self.users[3], True)
self.set_facebook_interceptor_for_friends(
{
'data':
[
{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']},
{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']},
{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']},
]
}
)
url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)})
response = self.client.get(
url,
{'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN}
)
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data)
# Assert that USERNAME_1 is returned
self.assertTrue(
'id' in response.data['friends'][0] and
response.data['friends'][0]['id'] == self.USERS[1]['FB_ID']
)
self.assertTrue(
'name' in response.data['friends'][0] and
response.data['friends'][0]['name'] == self.USERS[1]['USERNAME']
)
# Assert that USERNAME_2 is returned
self.assertTrue(
'id' in response.data['friends'][1] and
response.data['friends'][1]['id'] == self.USERS[2]['FB_ID']
)
self.assertTrue(
'name' in response.data['friends'][1] and
response.data['friends'][1]['name'] == self.USERS[2]['USERNAME']
)
# Assert that USERNAME_3 is returned
self.assertTrue(
'id' in response.data['friends'][2] and
response.data['friends'][2]['id'] == self.USERS[3]['FB_ID']
)
self.assertTrue(
'name' in response.data['friends'][2] and
response.data['friends'][2]['name'] == self.USERS[3]['USERNAME']
)
@httpretty.activate
def test_three_friends_in_paged_response(self):
# User 1 set up
self.user_create_and_signin(1)
self.enroll_in_course(self.users[1], self.course)
self.link_edx_account_to_social(self.users[1], self.BACKEND, self.USERS[1]['FB_ID'])
self.set_sharing_preferences(self.users[1], True)
# User 2 set up
self.user_create_and_signin(2)
self.enroll_in_course(self.users[2], self.course)
self.link_edx_account_to_social(self.users[2], self.BACKEND, self.USERS[2]['FB_ID'])
self.set_sharing_preferences(self.users[2], True)
# User 3 set up
self.user_create_and_signin(3)
self.enroll_in_course(self.users[3], self.course)
self.link_edx_account_to_social(self.users[3], self.BACKEND, self.USERS[3]['FB_ID'])
self.set_sharing_preferences(self.users[3], True)
self.set_facebook_interceptor_for_friends(
{
'data': [{'name': self.USERS[1]['USERNAME'], 'id': self.USERS[1]['FB_ID']}],
"paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_1"},
"summary": {"total_count": 652}
}
)
# Set the interceptor for the first paged content
httpretty.register_uri(
httpretty.GET,
"https://graph.facebook.com/v2.2/me/friends/next_1",
body=json.dumps(
{
"data": [{'name': self.USERS[2]['USERNAME'], 'id': self.USERS[2]['FB_ID']}],
"paging": {"next": "https://graph.facebook.com/v2.2/me/friends/next_2"},
"summary": {"total_count": 652}
}
),
status=201
)
# Set the interceptor for the last paged content
httpretty.register_uri(
httpretty.GET,
"https://graph.facebook.com/v2.2/me/friends/next_2",
body=json.dumps(
{
"data": [{'name': self.USERS[3]['USERNAME'], 'id': self.USERS[3]['FB_ID']}],
"paging": {
"previous":
"https://graph.facebook.com/v2.2/10154805434030300/friends?limit=25&offset=25"
},
"summary": {"total_count": 652}
}
),
status=201
)
url = reverse('friends-in-course', kwargs={"course_id": unicode(self.course.id)})
response = self.client.get(url, {'format': 'json', 'oauth_token': self._FB_USER_ACCESS_TOKEN})
self.assertEqual(response.status_code, 200)
self.assertTrue('friends' in response.data)
# Assert that USERNAME_1 is returned
self.assertTrue('id' in response.data['friends'][0])
self.assertTrue(response.data['friends'][0]['id'] == self.USERS[1]['FB_ID'])
self.assertTrue('name' in response.data['friends'][0])
self.assertTrue(response.data['friends'][0]['name'] == self.USERS[1]['USERNAME'])
# Assert that USERNAME_2 is returned
self.assertTrue('id' in response.data['friends'][1])
self.assertTrue(response.data['friends'][1]['id'] == self.USERS[2]['FB_ID'])
self.assertTrue('name' in response.data['friends'][1])
self.assertTrue(response.data['friends'][1]['name'] == self.USERS[2]['USERNAME'])
# Assert that USERNAME_3 is returned
self.assertTrue('id' in response.data['friends'][2])
self.assertTrue(response.data['friends'][2]['id'] == self.USERS[3]['FB_ID'])
self.assertTrue('name' in response.data['friends'][2])
self.assertTrue(response.data['friends'][2]['name'] == self.USERS[3]['USERNAME'])
| agpl-3.0 |
agreen757/adriangetsawesome | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/styles/autumn.py | 364 | 2144 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class AutumnStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
default_style = ""
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #4c8317',
Comment.Special: 'italic #0000aa',
Keyword: '#0000aa',
Keyword.Type: '#00aaaa',
Operator.Word: '#0000aa',
Name.Builtin: '#00aaaa',
Name.Function: '#00aa00',
Name.Class: 'underline #00aa00',
Name.Namespace: 'underline #00aaaa',
Name.Variable: '#aa0000',
Name.Constant: '#aa0000',
Name.Entity: 'bold #800',
Name.Attribute: '#1e90ff',
Name.Tag: 'bold #1e90ff',
Name.Decorator: '#888888',
String: '#aa5500',
String.Symbol: '#0000aa',
String.Regex: '#009999',
Number: '#009999',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
| mit |
zverevalexei/trex-http-proxy | trex_client/external_libs/pyyaml-3.11/python3/yaml/error.py | 294 | 2533 |
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
class Mark:
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = self.buffer[start:end]
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| mit |
andreadean5/python-hpOneView | examples/metric_streaming.py | 2 | 2754 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from pprint import pprint
from config_loader import try_load_from_file
from hpOneView.oneview_client import OneViewClient
config = {
"ip": "172.16.102.59",
"credentials": {
"userName": "administrator",
"password": ""
}
}
# Try load config from a file (if there is a config file)
config = try_load_from_file(config)
oneview_client = OneViewClient(config)
metrics_configuration = {
"sourceTypeList": [
{
"sourceType": "/rest/power-devices",
"sampleIntervalInSeconds": "300",
"frequencyOfRelayInSeconds": "3600"
},
{
"sourceType": "/rest/enclosures",
"sampleIntervalInSeconds": "600",
"frequencyOfRelayInSeconds": "3600"
},
{
"sourceType": "/rest/server-hardware",
"sampleIntervalInSeconds": "600",
"frequencyOfRelayInSeconds": "1800"
},
]
}
# Configure metric relay for server-hardware, enclosures and power-devices.
print("Configure metric streaming")
updated_metrics_configuration = oneview_client.metric_streaming.update_configuration(metrics_configuration)
pprint(updated_metrics_configuration)
# Get current relay configuration
print("Get current configuration")
current_configuration = oneview_client.metric_streaming.get_configuration()
pprint(current_configuration)
# Gets the list of all supported metrics and resource types.
print("Gets the list of all supported metrics and resource types")
supported_metrics = oneview_client.metric_streaming.get_capability()
pprint(supported_metrics)
| mit |
custode/reviewboard | reviewboard/site/decorators.py | 5 | 2318 | from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.template.context import RequestContext
from djblets.util.decorators import simple_decorator
from reviewboard.site.models import LocalSite
@simple_decorator
def check_local_site_access(view_func):
"""Checks if a user has access to a Local Site.
This checks whether or not the logged-in user is either a member of
a Local Site or if the user otherwise has access to it.
given local site. If not, this shows a permission denied page.
"""
def _check(request, local_site_name=None, *args, **kwargs):
if local_site_name:
local_site = get_object_or_404(LocalSite, name=local_site_name)
if not local_site.is_accessible_by(request.user):
if local_site.public or request.user.is_authenticated():
response = render_to_response('permission_denied.html',
RequestContext(request))
response.status_code = 403
return response
else:
return HttpResponseRedirect(
'%s?next_page=%s'
% (reverse('login'), request.get_full_path()))
else:
local_site = None
return view_func(request, local_site=local_site, *args, **kwargs)
return _check
@simple_decorator
def check_localsite_admin(view_func):
"""Checks if a user is an admin on a Local Site.
This checks whether or not the logged-in user is marked as an admin for the
given local site. If not, this shows a permission denied page.
"""
def _check(request, local_site_name=None, *args, **kwargs):
if local_site_name:
site = get_object_or_404(LocalSite, name=local_site_name)
if not site.is_mutable_by(request.user):
response = render_to_response('permission_denied.html',
RequestContext(request))
response.status_code = 403
return response
return view_func(request, local_site_name=local_site_name,
*args, **kwargs)
return _check
| mit |
walterst/qiime | qiime/quality_scores_plot.py | 6 | 7014 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "[email protected]"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from matplotlib.pyplot import (plot, savefig, xlabel, ylabel, text,
hist, figure, legend, title, show,
xlim, ylim, xticks, yticks, scatter,
subplot)
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
lootr/netzob | netzob/src/netzob/Inference/Vocabulary/FormatOperations/FieldSplitDelimiter.py | 2 | 13859 | # -*- coding: utf-8 -*-
# +---------------------------------------------------------------------------+
# | 01001110 01100101 01110100 01111010 01101111 01100010 |
# | |
# | Netzob : Inferring communication protocols |
# +---------------------------------------------------------------------------+
# | Copyright (C) 2011-2017 Georges Bossert and Frédéric Guihéry |
# | This program is free software: you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation, either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program. If not, see <http://www.gnu.org/licenses/>. |
# +---------------------------------------------------------------------------+
# | @url : http://www.netzob.org |
# | @contact : [email protected] |
# | @sponsors : Amossys, http://www.amossys.fr |
# | Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | File contributors : |
# | - Georges Bossert <georges.bossert (a) supelec.fr> |
# | - Frédéric Guihéry <frederic.guihery (a) amossys.fr> |
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Standard library imports
# +---------------------------------------------------------------------------+
# +---------------------------------------------------------------------------+
# | Local application imports
# +---------------------------------------------------------------------------+
from netzob.Common.Utils.Decorators import typeCheck, NetzobLogger
from netzob.Model.Vocabulary.AbstractField import AbstractField
from netzob.Model.Vocabulary.Types.AbstractType import AbstractType
from netzob.Model.Vocabulary.Domain.DomainFactory import DomainFactory
from netzob.Model.Vocabulary.Field import Field
from netzob.Model.Vocabulary.Domain.Variables.Nodes.Alt import Alt
from netzob.Model.Vocabulary.Types.TypeConverter import TypeConverter
from netzob.Model.Vocabulary.Types.BitArray import BitArray
from netzob.Model.Vocabulary.Types.Raw import Raw
from netzob.Model.Vocabulary.Types.HexaString import HexaString
@NetzobLogger
class FieldSplitDelimiter(object):
# Static method
@staticmethod
@typeCheck(AbstractField, AbstractType)
def split(field, delimiter):
"""Split a field (or symbol) with a specific delimiter. The
delimiter can be passed either as an ASCII, a Raw, an
HexaString, or any objects that inherit from AbstractType.
>>> from netzob.all import *
>>> samples = [b"aaaaff000000ff10", b"bbff110010ff00000011", b"ccccccccfffe1f000000ff12"]
>>> messages = [RawMessage(data=sample) for sample in samples]
>>> symbol = Symbol(messages=messages[:3])
>>> Format.splitDelimiter(symbol, ASCII("ff"))
>>> print(symbol)
Field-0 | Field-sep-6666 | Field-2 | Field-sep-6666 | Field-4
---------- | -------------- | ------------ | -------------- | ----------
'aaaa' | 'ff' | '000000' | 'ff' | '10'
'bb' | 'ff' | '110010' | 'ff' | '00000011'
'cccccccc' | 'ff' | 'fe1f000000' | 'ff' | '12'
---------- | -------------- | ------------ | -------------- | ----------
>>> samples = [b"434d446964656e74696679230400000066726564", b"5245536964656e74696679230000000000000000", b"434d44696e666f2300000000", b"524553696e666f230000000004000000696e666f", b"434d4473746174732300000000", b"52455373746174732300000000050000007374617473", b"434d4461757468656e7469667923090000006d7950617373776421", b"52455361757468656e74696679230000000000000000", b"434d44656e6372797074230a00000031323334353674657374", b"524553656e637279707423000000000a00000073707176777436273136", b"434d4464656372797074230a00000073707176777436273136", b"5245536465637279707423000000000a00000031323334353674657374", b"434d446279652300000000", b"524553627965230000000000000000", b"434d446964656e746966792307000000526f626572746f", b"5245536964656e74696679230000000000000000", b"434d44696e666f2300000000", b"524553696e666f230000000004000000696e666f", b"434d4473746174732300000000", b"52455373746174732300000000050000007374617473", b"434d4461757468656e74696679230a000000615374726f6e67507764", b"52455361757468656e74696679230000000000000000", b"434d44656e63727970742306000000616263646566", b"524553656e6372797074230000000006000000232021262724", b"434d44646563727970742306000000232021262724", b"52455364656372797074230000000006000000616263646566", b"434d446279652300000000", b"524553627965230000000000000000"]
>>> messages = [RawMessage(data=TypeConverter.convert(sample, HexaString, Raw)) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> symbol.encodingFunctions.add(TypeEncodingFunction(ASCII)) # Change visualization to hexastring
>>> Format.splitDelimiter(symbol, ASCII("#"))
>>> print(symbol)
Field-0 | Field-sep-23 | Field-2 | Field-sep-23 | Field-4
--------------- | ------------ | -------------------- | ------------ | -------
'CMDidentify' | '#' | '....fred' | '' | ''
'RESidentify' | '#' | '........' | '' | ''
'CMDinfo' | '#' | '....' | '' | ''
'RESinfo' | '#' | '........info' | '' | ''
'CMDstats' | '#' | '....' | '' | ''
'RESstats' | '#' | '........stats' | '' | ''
'CMDauthentify' | '#' | '....myPasswd!' | '' | ''
'RESauthentify' | '#' | '........' | '' | ''
'CMDencrypt' | '#' | '....123456test' | '' | ''
'RESencrypt' | '#' | "........spqvwt6'16" | '' | ''
'CMDdecrypt' | '#' | "....spqvwt6'16" | '' | ''
'RESdecrypt' | '#' | '........123456test' | '' | ''
'CMDbye' | '#' | '....' | '' | ''
'RESbye' | '#' | '........' | '' | ''
'CMDidentify' | '#' | '....Roberto' | '' | ''
'RESidentify' | '#' | '........' | '' | ''
'CMDinfo' | '#' | '....' | '' | ''
'RESinfo' | '#' | '........info' | '' | ''
'CMDstats' | '#' | '....' | '' | ''
'RESstats' | '#' | '........stats' | '' | ''
'CMDauthentify' | '#' | '....aStrongPwd' | '' | ''
'RESauthentify' | '#' | '........' | '' | ''
'CMDencrypt' | '#' | '....abcdef' | '' | ''
'RESencrypt' | '#' | '........' | '#' | " !&'$"
'CMDdecrypt' | '#' | '....' | '#' | " !&'$"
'RESdecrypt' | '#' | '........abcdef' | '' | ''
'CMDbye' | '#' | '....' | '' | ''
'RESbye' | '#' | '........' | '' | ''
--------------- | ------------ | -------------------- | ------------ | -------
>>> print(symbol.fields[0]._str_debug())
Field-0
|-- Alt
|-- Data (Raw=b'CMDidentify' ((0, 88)))
|-- Data (Raw=b'RESidentify' ((0, 88)))
|-- Data (Raw=b'CMDinfo' ((0, 56)))
|-- Data (Raw=b'RESinfo' ((0, 56)))
|-- Data (Raw=b'CMDstats' ((0, 64)))
|-- Data (Raw=b'RESstats' ((0, 64)))
|-- Data (Raw=b'CMDauthentify' ((0, 104)))
|-- Data (Raw=b'RESauthentify' ((0, 104)))
|-- Data (Raw=b'CMDencrypt' ((0, 80)))
|-- Data (Raw=b'RESencrypt' ((0, 80)))
|-- Data (Raw=b'CMDdecrypt' ((0, 80)))
|-- Data (Raw=b'RESdecrypt' ((0, 80)))
|-- Data (Raw=b'CMDbye' ((0, 48)))
|-- Data (Raw=b'RESbye' ((0, 48)))
Below is another example of the FieldSplitDelimiter usage: it splits fields based on a Raw string.
>>> from netzob.all import *
>>> samples = [b"\\x01\\x02\\x03\\xff\\x04\\x05\\xff\\x06\\x07", b"\\x01\\x02\\xff\\x03\\x04\\x05\\x06\\xff\\x07", b"\\x01\\xff\\x02\\x03\\x04\\x05\\x06"]
>>> messages = [RawMessage(data=sample) for sample in samples]
>>> symbol = Symbol(messages=messages)
>>> Format.splitDelimiter(symbol, Raw(b"\\xff"))
>>> print(symbol)
Field-0 | Field-sep-ff | Field-2 | Field-sep-ff | Field-4
-------------- | ------------ | ---------------------- | ------------ | ----------
'\\x01\\x02\\x03' | b'\\xff' | '\\x04\\x05' | b'\\xff' | '\\x06\\x07'
'\\x01\\x02' | b'\\xff' | '\\x03\\x04\\x05\\x06' | b'\\xff' | '\\x07'
'\\x01' | b'\\xff' | '\\x02\\x03\\x04\\x05\\x06' | '' | ''
-------------- | ------------ | ---------------------- | ------------ | ----------
:param field : the field to consider when spliting
:type: :class:`netzob.Model.Vocabulary.AbstractField.AbstractField`
:param delimiter : the delimiter used to split messages of the field
:type: :class:`netzob.Model.Vocabulary.Types.AbstractType.AbstractType`
"""
if delimiter is None:
raise TypeError("Delimiter cannot be None.")
if field is None:
raise TypeError("Field cannot be None.")
if len(field.messages) < 1:
raise ValueError(
"The associated symbol does not contain any message.")
# Find message substrings after applying delimiter
splittedMessages = []
for cell in field.getValues(encoded=False, styled=False):
splittedMessage = cell.split(delimiter.value.tobytes())
splittedMessages.append(splittedMessage)
import itertools
# Inverse the array, so that columns contains observed values for each field
splittedMessages = list(itertools.zip_longest(*splittedMessages))
# If the delimiter does not create splitted fields
if len(splittedMessages) <= 1:
return
# Else, we add (2*len(splittedMessages)-1) fields
newFields = []
iField = -1
for i in range(len(splittedMessages)):
iField += 1
fieldDomain = list()
# temporary set that hosts all the observed values to prevent useless duplicate ones
observedValues = set()
has_inserted_empty_value = False
isEmptyField = True # To avoid adding an empty field
for v in splittedMessages[i]:
if v != "" and v is not None:
isEmptyField = False
if v not in observedValues:
fieldDomain.append(Raw(v))
observedValues.add(v)
else:
if not has_inserted_empty_value:
fieldDomain.append(Raw(nbBytes=0))
has_inserted_empty_value = True
if not isEmptyField:
newField = Field(
domain=DomainFactory.normalizeDomain(fieldDomain),
name="Field-" + str(iField))
newField.encodingFunctions = list(
field.encodingFunctions.values())
newFields.append(newField)
iField += 1
str_delimiter = TypeConverter.convert(delimiter.value, BitArray,
HexaString).decode('utf-8')
fieldName = "Field-sep-{}".format(str_delimiter)
newFields.append(
Field(domain=Alt([delimiter, Raw(nbBytes=0)]), name=fieldName))
newFields.pop()
# Reset the field
from netzob.Inference.Vocabulary.Format import Format
Format.resetFormat(field)
# Create a field for each entry
field.fields = newFields
| gpl-3.0 |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/test/script_helper.py | 82 | 5791 | # Common utility functions used by various script execution tests
# e.g. test_cmd_line, test_cmd_line_script and test_runpy
import sys
import os
import re
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
try:
import zipfile
except ImportError:
# If Python is build without Unicode support, importing _io will
# fail, which, in turn, means that zipfile cannot be imported
# Most of this module can then still be used.
pass
from test.test_support import strip_python_stderr
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
cmd_line.extend(args)
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
env.update(env_vars)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(False, *args, **env_vars)
def python_exit_code(*args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def spawn_python(*args, **kwargs):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def run_python(*args, **kwargs):
if __debug__:
p = spawn_python(*args, **kwargs)
else:
p = spawn_python('-O', *args, **kwargs)
stdout_data = kill_python(p)
return p.wait(), stdout_data
# Script creation utilities
@contextlib.contextmanager
def temp_dir():
dirname = tempfile.mkdtemp()
dirname = os.path.realpath(dirname)
try:
yield dirname
finally:
shutil.rmtree(dirname)
def make_script(script_dir, script_basename, source):
script_filename = script_basename+os.extsep+'py'
script_name = os.path.join(script_dir, script_filename)
script_file = open(script_name, 'w')
script_file.write(source)
script_file.close()
return script_name
def compile_script(script_name):
py_compile.compile(script_name, doraise=True)
if __debug__:
compiled_name = script_name + 'c'
else:
compiled_name = script_name + 'o'
return compiled_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', '')
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = compile_script(init_name)
script_name = compile_script(script_name)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
| gpl-3.0 |
kenxwagner/PythonPlay | Project/webscrap/websc/Lib/site-packages/pip/_internal/self_outdated_check.py | 9 | 8009 | # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import datetime
import hashlib
import json
import logging
import os.path
import sys
from pip._vendor import pkg_resources
from pip._vendor.packaging import version as packaging_version
from pip._vendor.six import ensure_binary
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.utils.filesystem import (
adjacent_tmp_file,
check_path_owner,
replace,
)
from pip._internal.utils.misc import (
ensure_dir,
get_installed_version,
redact_auth_from_url,
)
from pip._internal.utils.packaging import get_installer
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
import optparse
from optparse import Values
from typing import Any, Dict, Text, Union
from pip._internal.network.session import PipSession
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
def make_link_collector(
session, # type: PipSession
options, # type: Values
suppress_no_index=False, # type: bool
):
# type: (...) -> LinkCollector
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls,
)
link_collector = LinkCollector(session=session, search_scope=search_scope)
return link_collector
def _get_statefile_name(key):
# type: (Union[str, Text]) -> str
key_bytes = ensure_binary(key)
name = hashlib.sha224(key_bytes).hexdigest()
return name
class SelfCheckState(object):
def __init__(self, cache_dir):
# type: (str) -> None
self.state = {} # type: Dict[str, Any]
self.statefile_path = None
# Try to load the existing state
if cache_dir:
self.statefile_path = os.path.join(
cache_dir, "selfcheck", _get_statefile_name(self.key)
)
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError, KeyError):
# Explicitly suppressing exceptions, since we don't want to
# error out if the cache file is invalid.
pass
@property
def key(self):
return sys.prefix
def save(self, pypi_version, current_time):
# type: (str, datetime.datetime) -> None
# If we do not have a path to cache in, don't bother saving.
if not self.statefile_path:
return
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
state = {
# Include the key so it's easy to tell which pip wrote the
# file.
"key": self.key,
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
text = json.dumps(state, sort_keys=True, separators=(",", ":"))
with adjacent_tmp_file(self.statefile_path) as f:
f.write(ensure_binary(text))
try:
# Since we have a prefix-specific state file, we can just
# overwrite whatever is there, no need to check.
replace(f.name, self.statefile_path)
except OSError:
# Best effort.
pass
def was_installed_by_pip(pkg):
# type: (str) -> bool
"""Checks whether pkg was installed by pip
This is used not to display the upgrade message when pip is in fact
installed by system package manager, such as dnf on Fedora.
"""
try:
dist = pkg_resources.get_distribution(pkg)
return "pip" == get_installer(dist)
except pkg_resources.DistributionNotFound:
return False
def pip_self_version_check(session, options):
# type: (PipSession, optparse.Values) -> None
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if not installed_version:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = SelfCheckState(cache_dir=options.cache_dir)
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if (current_time - last_check).total_seconds() < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
# Lets use PackageFinder to see what the latest pip version is
link_collector = make_link_collector(
session,
options=options,
suppress_no_index=True,
)
# Pass allow_yanked=False so we don't suggest upgrading to a
# yanked version.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=False, # Explicitly set to False
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
best_candidate = finder.find_best_candidate("pip").best_candidate
if best_candidate is None:
return
pypi_version = str(best_candidate.version)
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
local_version_is_older = (
pip_version < remote_version and
pip_version.base_version != remote_version.base_version and
was_installed_by_pip('pip')
)
# Determine if our pypi_version is older
if not local_version_is_older:
return
# We cannot tell how the current pip is available in the current
# command context, so be pragmatic here and suggest the command
# that's always available. This does not accommodate spaces in
# `sys.executable`.
pip_cmd = "{} -m pip".format(sys.executable)
logger.warning(
"You are using pip version %s; however, version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command.",
pip_version, pypi_version, pip_cmd
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
| mit |
fablabnbg/inkscape-silhouette | silhouette/Strategy.py | 1 | 40922 | # (c) 2013 [email protected]
#
# Strategy.py -- cut strategy algorithms for a Graphtec Silhouette Cameo plotter.
#
# In order to support operation without a cutting mat, a strategic
# rearrangement of cuts is helpful.
# e.g.
# * With a knive, sharp turns are to be avoided. They easily rupture the paper.
# * With some pens, the paper may become unstable if soaked with too much ink.
# Avoid backwards or inwards strokes.
# * In general, cut paper is fragile. Do not move backwards and cut, where other cuts
# were placed. We require (strict) monotonic progression along the sheet with
# minimal backwards movement.
#
# 2013-05-21, jw, V0.1 -- initial draught.
# 2013-05-23, jw, V0.2 -- dedup, subdivide, two options for sharp turn detectors added.
# draft for simple_barrier() added.
# 2013-05-25, jw, V0.3 -- corner_detect.py now jumps when not cutting.
# Strategy.py: new code: unlink_segment(), append_or_extend_simple().
# completed process_simple_barrier(), tested, debugged, verbose level reduced.
# The current slicing and sharp corner strategy appears useful.
# 2013-05-26, jw, V1.0 -- adopted version number from inkscape_silhouette package.
# improved path extension logic in append_or_extend_hard(),
# much better, but still not perfect.
# Verbose printf's to stderr, so that inkscape survives.
# 2013-05-26, jw, V1.1 -- path_overshoot() added, this improves quality
# and the paper now comes apart by itself.
# Added algorithm prose for process_pyramids_barrier()
# 2013-05-31, jw, V1.3 -- renamed sharp_turn() to sharp_turn_90, added sharp_turn_45(), sharp_turn_63()
# Using .x, .y syntax provided by class XY_a() instead of [0], [1] everywhere.
# ccw() and sharp_turn*() now global. No class needed.
# Using class Barrier from Geomentry in the main loop of pyramids_barrier()
from __future__ import print_function
import copy # deepcopy
import math # sqrt
import sys # maxsize
from silhouette.Geometry import *
presets = {
'default': {
'pyramids_algorithm': False,
'corner_detect_min_jump': 2,
'corner_detect_dup_epsilon': 0.1,
'monotone_back_travel': 10.0,
'sharp_turn_fwd_ratio': 0.0,
'barrier_increment': 10.0,
'overshoot': 0.2, # works well with 80g paper
'tool_pen': False,
'verbose': 1
},
'pyramids': {
'pyramids_algorithm': True,
'monotone_back_travel': 5.0,
'sharp_turn_fwd_ratio': 0.5,
'overshoot': 0.2, # works well with 80g paper
'min_subdivide':0.5,
'tool_pen': False,
'do_slicing': True,
'verbose': 1
},
'nop': {
'do_dedup': False,
'do_subdivide': False,
'do_slicing': False,
'overshoot': 0,
'tool_pen': False,
'verbose': 2
}
}
class MatFree:
def __init__(self, preset="default", scale=1.0, pen=None):
"""This initializer defines settings for the apply() method.
A scale factor is applied to convert input data units to mm.
This is needed, as the length units used in presets are mm.
"""
self.verbose = 0
self.do_dedup = True
self.do_subdivide = True
self.do_slicing = True
self.tool_pen = False
self.barrier_increment = 3.0 # used only in simple_barrier()
self.overshoot = 0.0
self.min_subdivide = 0.5 # may subdivide. if needed.
self.min_segmentlen = 0.1 # drop segments shorter than this.
self.monotone_back_travel = 3.0 # used in both, simple_barrier() and pyramids_barrier()
self.sharp_turn_fwd_ratio = 0.99 # 0.5 == 63 deg, 1.0 == 45 deg
self.input_scale = scale
self.pyramids_algorithm = False
self.preset(preset)
if pen is not None:
self.tool_pen = pen
self.min_subdivide_sq = self.min_subdivide * self.min_subdivide
# this avoids a busyloop after hitting Y_bar:
if self.min_segmentlen < 0.001: self.min_segmentlen = 0.001
self.points = []
self.points_dict = {}
self.paths = []
def list_presets(self):
return copy.deepcopy(presets)
def preset(self, pre_name):
if not pre_name in presets:
raise ValueError(pre_name+': no such preset. Try "'+'", "'.join(presets.keys())+'"')
pre = presets[pre_name]
for k in pre.keys():
self.__dict__[k] = pre[k]
def export(self):
"""reverse of load(), except that the nodes are tuples of
[x, y, { ... attrs } ]
Most notable attributes:
- 'sharp', it is present on nodes where the path turns by more
than 90 deg.
"""
cut = []
for path in self.paths:
new_path = []
for pt in path:
new_path.append(self.points[pt])
cut.append(new_path)
return cut
def pt2idx(self, x,y):
"""all points have an index, if the index differs, the point
is at a different locations. All points also have attributes
stored with in the point object itself. Points that appear for the second
time receive an attribute 'dup':1, which is incremented on further reoccurences.
"""
k = str(x)+','+str(y)
if k in self.points_dict:
idx = self.points_dict[k]
if self.verbose:
print("%d found as dup" % idx, file=sys.stderr)
if 'dup' in self.points[idx].attr:
self.points[idx].dup += 1
else:
self.points[idx].dup = 1
else:
idx = len(self.points)
self.points.append(XY_a((x,y)))
self.points_dict[k] = idx
self.points[idx].id = idx
return idx
def load(self, cut):
"""load a sequence of paths.
Nodes are expected as tuples (x, y).
We extract points into a seperate list, with attributes as a third
element to the tuple. Typical attributes to be added by other methods
id, seg[] by method link_points(), sharp by method mark_sharp_segs(),
...
"""
for path in cut:
new_path = []
for point in path:
idx = self.pt2idx(self.input_scale * point[0], self.input_scale * point[1])
if len(new_path) == 0 or new_path[-1] != idx or self.do_dedup == False:
# weed out repeated points
new_path.append(idx)
# self.points[idx].refcount += 1
self.paths.append(new_path)
def link_points(s):
"""add segments (back and forth) between connected points.
"""
for path in s.paths:
A = None
for pt in path:
if A is not None:
if 'seg' in s.points[A].attr:
s.points[A].seg.append(pt)
else:
s.points[A].seg = [ pt ]
if 'seg' in s.points[pt].attr:
s.points[pt].seg.append(A)
else:
s.points[pt].seg = [ A ]
A = pt
def subdivide_segments(s, maxlen):
"""Insert addtional points along the paths, so that
no segment is longer than maxlen
"""
if s.do_subdivide == False:
return
maxlen_sq = maxlen * maxlen
for path_idx in range(len(s.paths)):
path = s.paths[path_idx]
new_path = []
for pt in path:
if len(new_path):
A = new_path[-1]
dist_a_pt_sq = dist_sq(s.points[A], s.points[pt])
if dist_a_pt_sq > maxlen_sq:
dist = math.sqrt(dist_a_pt_sq)
nsub = int(dist/maxlen)
seg_len = dist/float(nsub+1)
dx = (s.points[pt].x - s.points[A].x)/float(nsub+1)
dy = (s.points[pt].y - s.points[A].y)/float(nsub+1)
if s.verbose > 1:
print("pt%d -- pt%d: need nsub=%d, seg_len=%g" % (A,pt,nsub,seg_len), file=sys.stderr)
print("dxdy", dx, dy, "to", (s.points[pt].x, s.points[pt].y), "from", (s.points[A].x,s.points[A].y), file=sys.stderr)
for subdiv in range(nsub):
sub_pt =s.pt2idx(s.points[A].x+dx+subdiv*dx,
s.points[A].y+dy+subdiv*dy)
new_path.append(sub_pt)
s.points[sub_pt].sub = True
if s.verbose > 1:
print(" sub", (s.points[sub_pt].x, s.points[sub_pt].y), file=sys.stderr)
new_path.append(pt)
s.paths[path_idx] = new_path
def mark_sharp_segs(s):
"""walk all the points and check their segments attributes,
to see if there are connections that form a sharp angle.
This needs link_points() to be called earlier.
One sharp turn per point is enough to make us careful.
We don't track which pair of turns actually is a sharp turn, if there
are more than two segs. Those cases are rare enough to allow the inefficiency.
TODO: can honor corner_detect_min_jump? Even if so, what should we do in the case
where multiple points are so close together that the paper is likely to tear?
"""
for pt in s.points:
if 'sharp' in pt.attr:
## shortcut existing flags. One sharp turn per point is enough to make us careful.
## we don't want to track which pair of turns actually is a sharp turn, if there
## are more than two segments per point. Those cases are rare enough
## to handle them inefficiently.
continue
if 'seg' in pt.attr:
ll = len(pt.seg)
# if ll > 4:
# ## You cannot attach 5 lines to a point without creating one sharp angle.
# ## This is true for sharp turn defined as >90 degree.
# pt.sharp = True
# continue
## look at each pair of segments once, check their angle.
for l1 in range(ll):
A = s.points[pt.seg[l1]]
for l2 in range(l1+1, ll):
B = s.points[pt.seg[l2]]
if sharp_turn(A,pt,B, s.sharp_turn_fwd_ratio):
pt.sharp = True
if 'sharp' in pt.attr:
break
else:
print("warning: no segments in point %d. Run link_points() before mark_sharp_segs()" % (pt.id), file=sys.stderr)
def mark_sharp_paths(s):
"""walk through all paths, and add an attribute { 'sharp': True } to the
points that respond true with the sharp_turn() method.
Caution: mark_sharp_paths() walks in the original order, which may be irrelevant
after reordering.
This marks sharp turns only if paths are not intersecting or touching back.
Assuming segment counts <= 2. Use mark_sharp_segs() for the general case.
Downside: mark_sharp_segs() does not honor corner_detect_min_jump.
"""
## Caution: unused code, but some nice ideas here: min_jump and dup_epsilon.
## We keept this code around for reference.
min_jump_sq = s.corner_detect_min_jump * s.corner_detect_min_jump
dup_eps_sq = s.corner_detect_dup_epsilon * s.corner_detect_dup_epsilon
idx = 1
A = None
B = None
for path in s.paths:
if B is not None and len(path) and dist_sq(B, s.points[path[0]]) > min_jump_sq:
# disconnect the path, if we jump more than 2mm
A = None
B = None
for iC in path:
C = s.points[iC]
if B is not None and dist_sq(B,C) < dup_eps_sq:
# less than 0.1 mm distance: ignore the point as a duplicate.
continue
if A is not None and sharp_turn(A,B,C, s.sharp_turn_fwd_ratio):
B.sharp = True
A = B
B = C
#
#
def append_or_extend_hard(s, seg):
"""adds a segment to the output list. The segment extends the previous segment,
if the last point if the previous segment is identical with our first
point. If the segment has no sharp points, we double check if extend
would work with the inverted segment. Optionally also flipping around
the previous segment if it would help. (FIXME: this possibility should
be detected earlier)
Otherwise, the segment is appended as a new path.
"""
if not 'output' in s.__dict__: s.output = []
if len(s.output) and s.verbose > 1:
print("append_or_extend_hard...", s.output[-1][-1], seg, file=sys.stderr)
if (len(s.output) > 0 and len(s.output[-1]) >= 2 and
'sharp' not in s.output[-1][0] and
'sharp' not in s.output[-1][-1]):
# we could flip around the previous segment, if needed:
if (s.output[-1][0].id == seg[0].id or
s.output[-1][0].id == seg[-1].id):
# yes, flipping the previous segment, will help below. do it.
s.output[-1] = list(reversed(s.output[-1]))
if s.verbose:
print("late flip ", len(s.output), len(s.output[-1]), file=sys.stderr)
#
#
if len(s.output) > 0 and s.output[-1][-1].id == seg[0].id:
s.output[-1].extend(seg[1:])
if s.verbose > 1:
print("... extend", file=sys.stderr)
elif len(s.output) > 0 and s.output[-1][-1].id == seg[-1].id:
## check if we can turn it around
if 'sharp' not in s.output[-1][-1].attr and 'sharp' not in seg[-1].attr and 'sharp' not in seg[0].attr:
s.output[-1].extend(list(reversed(seg))[1:])
if s.verbose > 1:
print("... extend reveresed", file=sys.stderr)
else:
s.output.append(seg)
if s.verbose > 1:
print("... append", file=sys.stderr)
#
else:
s.output.append(seg)
if s.verbose > 1:
print("... append", file=sys.stderr)
#
def append_or_extend_simple(s, seg):
"""adds a segment to the output list. The segment extends the previous segment,
if the last point if the previous segment is identical with our first
point.
Otherwise, the segment is appended as a new path.
"""
if not 'output' in s.__dict__: s.output = []
if len(s.output) and s.verbose > 2:
print("append_or_extend_simple...", s.output[-1][-1], seg, file=sys.stderr)
if len(s.output) > 0 and s.output[-1][-1].id == seg[0].id:
s.output[-1].extend(seg[1:])
if s.verbose > 1:
print("... extend", file=sys.stderr)
else:
s.output.append(seg)
if s.verbose > 1:
print("... append", file=sys.stderr)
#
def unlink_segment(s, A, B):
"""Remove the segment [AB] from the s.points list.
The segment is removed, by replacing its slot with a negative number.
The endpoints are marked with seen=True so that in case of a sharp turn,
we know we can no longer start there.
If now A or B are without other active segments, A and/or B are dropped
entirely from s.points .
process_simple_barrier() and process_pyramids_barrier() ignore points and segments
that have already been done. This asserts progress in the algorithms.
"""
A.seen = True
B.seen = True
iA = A.id
iB = B.id
a_seg_todo = False
b_seg_todo = False
for iS in range(len(A.seg)):
if A.seg[iS] == iB: A.seg[iS] = -iB or -sys.maxsize
if A.seg[iS] >= 0: a_seg_todo = True
for iS in range(len(B.seg)):
if B.seg[iS] == iA: B.seg[iS] = -iA or -sys.maxsize
if B.seg[iS] >= 0: b_seg_todo = True
# CAUTION: is this really helpful?:
## it prevents points from a slice to go into process_simple_barrier()'s segment list,
## but it also hides information....
if not a_seg_todo: s.points[iA] = None
if not b_seg_todo: s.points[iB] = None
def shortcut_segment(self, A, B, C):
""" Asuming [AC],[CB] are segments (in A.seg, B.seg, C.seg)
we remove C as the intermediate link and direcly connect [AB]
This removes C from self.points (by replacing with None) if
C has no other segments.
This is the opposite of subdivide_segment()
"""
a_seg_idx = None
for n in range(0,len(A.seg)):
if A.seg[n] == C.id:
a_seg_idx = n
break
b_seg_idx = None
for n in range(0,len(B.seg)):
if B.seg[n] == C.id:
b_seg_idx = n
break
c_a_seg_idx = None
for n in range(0,len(C.seg)):
if C.seg[n] == A.id:
c_a_seg_idx = n
break
c_b_seg_idx = None
for n in range(0,len(C.seg)):
if C.seg[n] == B.id:
c_b_seg_idx = n
break
if None in (a_seg_idx, b_seg_idx, c_a_seg_idx, c_b_seg_idx):
raise ValueError("shortcut_segment cannot find [AC] and [CB] seg.", a_seg_idx, b_seg_idx, c_a_seg_idx, c_b_seg_idx)
A.seg[a_seg_idx] = B.id
B.seg[b_seg_idx] = A.id
C.seg[c_a_seg_idx] = -A.id or -sys.maxsize
C.seg[c_b_seg_idx] = -B.id or -sys.maxsize
if len(C.seg) == 2:
C.attr['obsolete'] = True
self.points[C.id] = None
print("shortcut_segment: point C obsoleted. A,B,C:", A, B, C, C.att(), file=sys.stderr)
def subdivide_segment(self, A, B, C):
""" Asuming [AB] is a segment (A.seg has B and B.seg has A),
we insert C as an intermediate link [AC],[CB].
This also adds C to self.points .
Returns True, if subdivision was done.
Returns False, if [AB] was shorter than min_subdivide.
"""
print("subdivide_segment A,B,C: ", A,A.att(), B,B.att(), C,C.att(), file=sys.stderr)
if dist_sq(A, B) < self.min_subdivide_sq:
print(" ---- too short, nothing done.", file=sys.stderr)
# should be caught earlier!
sys.exit(0)
a_seg_idx = None
for n in range(0,len(A.seg)):
if A.seg[n] == B.id:
a_seg_idx = n
break
b_seg_idx = None
for n in range(0,len(B.seg)):
if B.seg[n] == A.id:
b_seg_idx = n
break
if b_seg_idx is None or a_seg_idx is None:
raise ValueError("A,B not linked???")
C.sub = True
C.id = len(self.points)
self.points.append(C)
A.seg[a_seg_idx] = C.id
B.seg[b_seg_idx] = C.id
C.seg = [A.id,B.id]
return True
def output_add(s, A, B, cut=False):
"""If cut=True, output the segment [AB] as a cut.
Otherwise jump to B, starting a new path there.
A is passed so that we can check that this is really the last point
we have visited. If not, a jump to A is inserted, before the cut.
If cut is False, we can directly jump the output list to B.
This is a simpler version of append_or_extend_simple(), which recombines
segments, into paths. We don't.
Caller is responsible to postprocess afterwards:
* remove pathological short segments.
* flip segments so that we can
* recombine segments into paths.
"""
if not 'output' in s.__dict__: s.output = []
if s.verbose >= 1:
if len(s.output):
print("output_add", s.output[-1][-1], A, B, file=sys.stderr)
else:
print("output_add", None, A, B, file=sys.stderr)
#
print("\t....................................", file=sys.stderr)
if len(s.output) > 30:
sys.exit(2)
if cut:
s.output.append([A,B])
else:
s.output.append([A]) # quite useless....
s.output.append([B])
def _dump_all(s):
""" dump all points in a readable way.
"""
for iP in range(0,len(s.points)):
pt = s.points[iP]
if pt is None: continue
a = pt.att() if pt else None
print(iP, ": ", pt, a)
def process_pyramids_barrier(s, y_slice, max_y, left2right=True):
""" finding the next point involves overshadowing other points.
Our assumption is, that it is save to cut the paper at point A,
whenever there is a triangle sitting on the baseline (where the
transport rollers are) with 2x 45 degree coming from both sides,
meeting at 90 degrees at point A, so that the inside of the
triangle is free of any cuts.
We prefer to cut away from the rollers, if possible, but that is
subordinate rule -- applicable, whenever the cut direction can
be freely chosen. If point A is already part of a cut, then we cut the
path A-B always towards A, never starting at A.
A horizontal barrier Y_bar exists, that limits our downwards movement temporarily.
We assume to be called again with lowered Y_bar (increased max_y, it counts downwards).
Another barrier Xf_bar is a forward slanted 45 degree barrier that is swept sideways.
Points become eligible, if they are above Y_bar and behind Xf_bar.
We start with the sideways barrier from left to right aka increasing x.
In this case 'behind' means to the left of Xf_bar. (Every second sweep
will be the opposite direction, but below only left to right is
discussed).
The very first point that is behind Xf_bar is the starting point A. Then we iterate:
From any previous point A, we prefer to follow a line segment to reach
the next point B. Segments are eligible, if their B is rightward from A,
(B.x greater or equal A.x). We chose the segment with the lowest B.y coordinate
if there is any choice and check the following conditions:
a) B is below Y_bar.
Compute point C as the intersection of Y_bar with [AB]. Replace
the segment [AB] by segments [AC], [CB]. Let B and C swap names.
b) B is 45 degrees or more downwards from A (B.x-A.x < B.y-A.y)
We make an extra check to see if B would overshadow any point in the other
direction. Temporarily apply a backwards slanted barrier Xb_bar in A.
While moving the barrier to B, stop at the first point D to the left of AB
(i.e. ccw(A,B,D) == True) it hits, if any.
If so, position Xb_bar in D, compute point E as the intersection of Xb_bar
with A-B. Replace the segment [AB] by segments [AE], [EB].
If we have a point C remembered from a), then replace segments [EB], [BC] with [EC]
and garbage collect point B and swap back roles B and C.
Let B and E swap names.
c) B is more than 45 degrees upwards from A. This overshadows A. But we ignore
that A may have more segments to be done. We keep that B and consider
the issue with A unsolvable.
Note that 'advancing' from A to B in this case is actually backwards in
Xf_bar's view.
If we now have no B, then we simply move the sideways barrier to reveal our
next A -- very likely a jump rather than a cut. If no todo segments are left in
the old A, drop that old A. Iterate.
But if we have a B and it is not case c), then we tentatively advance Xf_bar
from A to B and record all new points F[] in the order we pass them. We
don't care about them, if they are all 'below' (on the right hand side
of) segment [AB]. For the first point F, that has ccw(A,B,F) == True,
we position Xf_bar in F, if any. If so, we compute point G as the
intersection of Xf_bar with [AB]. Replace the segment [AB] by segments
[AG], [GB]. We cut segment [AG]. We make F our next A - very likely a
jump. If no todo segments are left in the old A, drop that old A.
Iterate.
Exception for all subdivide actions above: if the segment is shorter than
self.min_subdivide, then just keep it as is.
If subdividing produces segments shorter than self.min_segmentlen, then we later
garbage collect such segments. overshoot shoud be more than min_segmentlen to
compensate for this.
If iteration exhausts, we are done with this processing sweep and
report back the lowest remaining min_y coordinate of all points we left
behind with segments todo. The next sweep will go the other direction.
Caller should call us again with direction toggled the other way, and
possibly advancing max_y = min_y + monotone_back_travel. The
variable barrier_increment is not used here, as we compute the
increment.
In the above context, 'cutting' a segment means, to add it to the output
list to deactivate its seg[] entries in the endpoints. Endpoints
without active segments do not contribute to the min_y computation
above, they are dropped.
When all points are dropped, we did our final sweep and return min_y =
None. It is caller's responsibility to check the direction of each cut
in the s.output list with regards to sharp points and cutting-towards-the-rollers.
Assert that we cut at least one segment per sweep or drop at least one
point per sweep. Also the number of added segments and points should
be less than what we eventually output and drop.
If not, the above algorithm may never end.
"""
Xf_bar = Barrier(y_slice, key=lambda a: a[0]+a[1] if a else 0) # forward: / moving ->
Xb_bar = Barrier(y_slice, key=lambda a: -a[0]+a[1] if a else 0) # backwards: \ moving <-
if not left2right: # forward: \ moving <-
Xf_bar,Xb_bar = Xb_bar,Xf_bar # backwards: / moving ->
A = Xf_bar.point()
while True:
if A is None:
Ai = Xf_bar.next()
if Ai is None: break
A = Xf_bar.point()
continue
print("process_pyramids_barrier", left2right, A, A.att(), file=sys.stderr)
B = None
a_todo = 0
for Bi in A.seg:
if Bi >= 0: # segment already done
a_todo += 1
pt = s.points[Bi]
if A.y+s.min_segmentlen >= max_y and pt.y > A.y:
continue # Do not look downward when close to max_y.
# This avoids a busyloop after hitting Y_bar:
# ... subdiv, advance, retry, hit, subdiv, ...
if left2right:
if pt.x >= A.x:
if B is None or not ccw(A,B,pt): # find the leftmost of all [AB]
B = pt
else: # not left2right
if pt.x <= A.x:
if B is None or ccw(A,B,pt): # find the rightmost of all [AB]
B = pt
if B is None:
print("no more forward segments", A, a_todo)
Xb_bar.find(A, start=0)
if a_todo == 0:
s.points[A.id] = None # drop A
while True:
Ai = Xf_bar.next()
A = None
print("xx next Ai candidate", Ai, file=sys.stderr)
if Ai is None: break
A = Xf_bar.point()
print("xx next A candidate", A, file=sys.stderr)
if A is None: break
if not Xb_bar.ahead(A):
break
else:
print("process_pyramids_barrier jump: Ignored A, ahead of Xb_bar", A, file=sys.stderr)
print("process_pyramids_barrier jump to next A", A, file=sys.stderr)
continue # just advance Xf_bar: jump
print("segment to check a), b)", A, B)
if False: # fake to trigger check a)
b_id = B.id
B = XY_a((1.3,20-2.1))
B.id = b_id
B.seg = [1,2,A.id,3,4]
print("faked segment to check a), b)", A, B)
#
subdividable_ab = bool(dist_sq(A,B) > s.min_subdivide_sq)
C = None
E = None
if subdividable_ab and B.y > max_y: # check a)
C = XY_a((intersect_y(A,B, max_y), max_y))
## same, but more expensive:
# C2 = intersect_lines(A,B,XY_a((0,max_y)),XY_a((.5,max_y)))
print("B below barrier, C=", C)
s.subdivide_segment(A,B,C)
Xf_bar.insert(C)
Xb_bar.insert(C)
B,C = C,B
# print(A.seg, B.seg, C.seg)
#
# All of the following shortens [AB] sufficiently, so that B does not
# cast shadow upwards on any other point: \B/
# Such a point would become our B.
# This asserts that there is no other point, whose pyramid would bury B.
# check b)
if (subdividable_ab and (
(left2right and B.x-A.x < B.y-A.y) or (not left2right and A.x-B.x < B.y-A.y))):
Xb_a_idx = Xb_bar.find(A, start=0) # could also use lookup() here. It does not matter.
Xb_b_idx = Xb_bar.find(B) # could also use lookup() here. It does not matter.
print("check b), moving Xb_bar from A to B", A, B, Xb_a_idx, Xb_b_idx, Xb_bar.key(A), Xb_bar.key(B))
D = None
for n in range(Xb_a_idx, Xb_b_idx+1): # sweep from A to B
pt = Xb_bar.point(n)
if pt.id != A.id and pt.id != B.id and ccw(A,B,pt) == True:
D = pt # found a D that is clearly left of AB.
break
else:
print("backsweep ignoring pt", pt, Xb_bar.key(pt))
#
if D is not None: # compute intersection of Xb_bar with [AB]
E = intersect_lines(D,XY_a((D.x+1,D.y+1)),A,B,limit2=True)
if E is None: raise ValueError("finding a shadowed D failed:", A, B, D)
E = XY_a(E)
s.subdivide_segment(A,B,E)
Xf_bar.insert(E)
Xb_bar.insert(E)
if C is not None:
s.shortcut_segment(E,C,B) # replace segments [EB], [BC] with [EC]
B,C = C,B
B,E = E,B
# tentatively advance Xf_bar from A to B
Xf_a_idx = Xf_bar.pos() # unused, we never move back to A.
Xf_b_idx = Xf_bar.lookup(lambda b: b.id==B.id if b else False)
if Xf_b_idx is None: # Should never happen!
print("Xf_bar.lookup(B)=None. B=",B) # Okayish fallback, but find() may return
Xf_b_idx = Xf_bar.find(B) # a different point with the same key().
print("line A,B:", A, B, Xf_a_idx, Xf_b_idx, Xf_bar.point(Xf_b_idx))
F = None
Xf_f_idx = None
for n in range(Xf_a_idx, Xf_b_idx+1): # sweep from A to B (inclusive)
pt = Xf_bar.point(n)
if pt is None: continue
if subdividable_ab and pt.id != A.id and pt.id != B.id and ccw(A,B,pt) == (not left2right):
F = pt # found an F that is clearly right of AB.
Xf_f_idx = n
break
else:
print("forward sweep ignoring pt", n, pt, pt.id, Xf_bar.key(pt))
#
if F is not None: # compute intersection of Xb_bar with [AB]
_F_back = (F.x-1,F.y+1) if left2right else (F.x+1,F.y+1)
G = intersect_lines(F,XY_a(_F_back),A,B,limit2=True)
if G is None: raise ValueError("finding a shadowed G failed:", A, B, F, _F_back)
G = XY_a(G)
s.subdivide_segment(A,B,G)
Xf_bar.insert(G)
Xb_bar.insert(G)
if E is not None:
pass
## FIXME: should s.shortcut_segment(...E) something here.
s.output_add(A,G,cut=True)
s.unlink_segment(A,G)
Xf_bar.pos(Xf_f_idx) # advance to F, further up on the same barrier as G
A = Xf_bar.point()
#
else:
s.output_add(A,B,cut=True)
s.unlink_segment(A,B)
Xf_bar.pos(Xf_b_idx) # advance
A = Xf_bar.point()
print("advanced A to", A, file=sys.stderr)
## barrier has moved all the way to the other end.
print("barrier moved all the way", Xf_bar.points, max_y, A.att() if A else None, A.seg if A else None, file=sys.stderr)
def process_simple_barrier(s, y_slice, max_y, last_x=0.0):
"""process all lines that segment points in y_slice.
the slice is examined using a scan-strategy. Either left to right or
right to left. last_x is used to deceide if the the left or
right end of the slice is nearest. We start at the nearer end, and
work our way to the farther end.
All line segments that are below max_y are promoted into the output list,
with a carefully chosen ordering and direction. append_or_extend_hard()
is used to merge segments into longer paths where possible.
The final x-coordinate is returned, so that the caller can provide us
with its value on the next call.
"""
if s.verbose:
print("process_simple_barrier limit=%g, points=%d, %s" % (max_y, len(y_slice), last_x), file=sys.stderr)
print(" max_y=%g" % (y_slice[-1].y), file=sys.stderr)
min_x = None
max_x = None
segments = []
for pt in y_slice:
if pt is None: # all segments to that point are done.
continue
if not 'seg' in pt.attr: # shit happens
continue
for iC in pt.seg:
if iC < 0: # this segment is done.
continue
C = s.points[iC]
if C is not None and C.y <= max_y:
if s.verbose > 1:
print(" segments.append", C, pt, file=sys.stderr)
segments.append((C,pt))
if min_x is None or min_x > C.x: min_x = C.x
if min_x is None or min_x > pt.x: min_x = pt.x
if max_x is None or max_x < C.x: max_x = C.x
if max_x is None or max_x < pt.x: max_x = pt.x
s.unlink_segment(C,pt)
#
#
#
left2right = s.decide_left2right(min_x, max_x, last_x)
xsign = -1.0
if left2right: xsign = 1.0
def dovetail_both_key(a):
return a[0].y+a[1].y+xsign*(a[0].x+a[1].x)
segments.sort(key=dovetail_both_key)
for segment in segments:
## Flip the orientation of each line segment according to this strategy:
## check 'sharp' both ends. (sharp is irrelevent without 'seen')
## if one has 'sharp' (and 'seen'), the other not, then cut towards the 'sharp' end.
## if none has that, cut according to decide_left2right()
## if both have it, we must subdivide the line segment, and cut from the
## midpoint to each end, in the order indicated by decide_left2right().
A = segment[0]
B = segment[1]
if 'sharp' in A.attr and 'seen' in A.attr:
if 'sharp' in B.attr and 'seen' in B.attr: # both sharp
iM = s.pt2idx((A.x+B.x)*.5, (A.y+B.y)*.5 )
M = s.points[iM]
if xsign*A.x <= xsign*B.x:
s.append_or_extend_hard([M, A])
s.append_or_extend_hard([M, B])
else:
s.append_or_extend_hard([M, B])
s.append_or_extend_hard([M, A])
else: # only A sharp
s.append_or_extend_hard([B, A])
else:
if 'sharp' in B.attr and 'seen' in B.attr: # only B sharp
s.append_or_extend_hard([A, B])
else: # none sharp
if xsign*A.x <= xsign*B.x:
s.append_or_extend_hard([A, B])
else:
s.append_or_extend_hard([B, A])
#
#
#
# return the last x coordinate of the last stroke
if not 'output' in s.__dict__: return 0
return s.output[-1][-1].x
def decide_left2right(s, min_x, max_x, last_x=0.0):
"""given the current x coordinate of the cutting head and
the min and max coordinates we need to go through, compute the best scan direction,
so that we minimize idle movements.
Returns True, if we should jump to the left end (aka min_x), then work our way to the right.
Returns False, if we should jump to the right end (aka max_x), then work our way to the left.
Caller ensures that max_x is >= min_x. ("The right end is to the right of the left end")
"""
if min_x >= last_x: return True # easy: all is to the right
if max_x <= last_x: return False # easy: all is to the left.
if abs(last_x - min_x) < abs(max_x - last_x):
return True # the left edge (aka min_x) is nearer
else:
return False # the right edge (aka max_x) is nearer
def pyramids_barrier(s):
"""Move a barrier in ascending y direction.
For each barrier position, find connected segments that are as high above the barrier
as possible. A pyramidonal shadow (opening 45 deg in each direction) is cast upward
to see if a point is acceptable for the next line segment. If the shadow touches other points,
that still have line segment not yet done, we must chose one of these points first.
While obeying this shadow rule, we also sweep left and right through the data, similar to the
simple_barrier() algorithm below.
"""
s.output = []
if not s.do_slicing:
for path in s.paths:
s.output.append([])
for idx in path:
s.output[-1].append(s.points[idx])
# if idx == 33: print(s.points[idx].att())
#
#
return
dir_toggle = True
old_min_y = -1e10
old_len_output = len(s.output)
while True:
## always recreate the barrier, so that newly added subdivision points are seen.
Y_bar = Barrier(s.points, key=lambda a: a[1] if a else 0)
while Y_bar.point() is None: # skip forward dropped points
# print("Y_bar skipping idx", Y_bar.pos(), file=sys.stderr)
if Y_bar.next() is None: # next() returns an idx, except when hitting the end.
break
min_y = Y_bar.point().y
barrier_y = min_y + s.monotone_back_travel
print("\t>>>>>>>>>>>>>>> new Y-slice between", min_y, barrier_y, file=sys.stderr)
y_max_idx = Y_bar.find((0, barrier_y))
s.process_pyramids_barrier(Y_bar.pslice(), barrier_y, left2right=dir_toggle)
# print("process_pyramids_barrier returns", len(s.output), y_max_idx, len(s.points), file=sys.stderr)
# print("output so far: ", s.output, file=sys.stderr)
if old_len_output == len(s.output) and old_min_y == min_y:
print("pyramids_barrier aborted: no progress, stuck at min_y=", min_y, file=sys.stderr)
break;
old_len_output = len(s.output)
old_min_y = min_y
dir_toggle = not dir_toggle
#
def simple_barrier(s):
"""move a barrier in ascending y direction.
For each barrier position, only try to cut lines that are above the barrier.
Flip the sign for all segment ends that were cut to negative. This flags them as done.
Add a 'seen' attribute to all nodes that have been visited once.
When no more cuts are possible, then move the barrier, try again.
A point that has all segments with negative signs is removed.
Input is read from s.paths[] -- having lists of point indices.
The output is placed into s.output[] as lists of XY_a() objects
by calling process_simple_barrier() and friends.
"""
if not s.do_slicing:
s.output = []
for path in s.paths:
s.output.append([])
for idx in path:
s.output[-1].append(s.points[idx])
#
#
return
## first step sort the points into an additional list by ascending y.
def by_y_key(a):
return a.y
sy = sorted(s.points, key=by_y_key)
barrier_y = s.barrier_increment
barrier_idx = 0 # pointing to the first element that is beyond.
last_x = 0.0 # we start at home.
while True:
old_idx = barrier_idx
while sy[barrier_idx].y < barrier_y:
barrier_idx += 1
if barrier_idx >= len(sy):
break
if barrier_idx > old_idx:
last_x = s.process_simple_barrier(sy[0:barrier_idx], barrier_y, last_x=last_x)
if barrier_idx >= len(sy):
break
barrier_y += s.barrier_increment
#
def apply_overshoot(s, paths, start_travel, end_travel):
"""Extrapolate path in the output list by the give travel at start and/or end
Paths are extended linear, curves are not taken into accound.
The intended effect is that interrupted cuts actually overlap at the
split point. The knive may otherwise leave some uncut material around
the split point.
"""
def extend_b(A,B,travel):
d = math.sqrt(dist_sq(A,B))
if d < 0.000001: return B # cannot extrapolate if A == B
ratio = travel/d
dx = B.x-A.x
dy = B.y-A.y
C = XY_a((B.x+dx*ratio, B.y+dy*ratio))
if 'sharp' in B.attr: C.sharp = True
return C
for path in paths:
if start_travel > 0.0:
path[0] = extend_b(path[1],path[0], start_travel)
if end_travel > 0.0:
path[-1] = extend_b(path[-2],path[-1], end_travel)
return paths
def apply(self, cut):
self.load(cut)
if self.pyramids_algorithm:
self.link_points()
self.mark_sharp_segs()
self.pyramids_barrier()
else:
self.subdivide_segments(self.monotone_back_travel)
self.link_points()
self.mark_sharp_segs()
self.simple_barrier()
if self.tool_pen == False and self.overshoot > 0.0:
self.output = self.apply_overshoot(self.output, self.overshoot, self.overshoot)
return self.output
| gpl-2.0 |
radicalbit/ambari | ambari-common/src/main/python/resource_management/libraries/functions/list_ambari_managed_repos.py | 3 | 1906 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
__all__ = ["list_ambari_managed_repos"]
import os
import glob
from ambari_commons.os_check import OSCheck
from resource_management.core.exceptions import Fail
def list_ambari_managed_repos(stack_name):
"""
Lists all repositories that are present at host
"""
stack_name = stack_name.upper()
# TODO : get it dynamically from the server
repository_names = [stack_name, stack_name + "-UTILS" ]
if OSCheck.is_ubuntu_family():
repo_dir = '/etc/apt/sources.list.d/'
elif OSCheck.is_redhat_family(): # Centos/RHEL 5/6
repo_dir = '/etc/yum.repos.d/'
elif OSCheck.is_suse_family():
repo_dir = '/etc/zypp/repos.d/'
else:
raise Fail('Can not dermine repo dir')
repos = []
for name in repository_names:
# List all files that match pattern
files = glob.glob(os.path.join(repo_dir, name) + '*')
for f in files:
filename = os.path.basename(f)
# leave out extension
reponame = os.path.splitext(filename)[0]
repos.append(reponame)
# get uniq strings
seen = set()
uniq = [s for s in repos if not (s in seen or seen.add(s))]
return uniq
| apache-2.0 |
fujunwei/chromium-crosswalk | build/android/pylib/base/base_test_runner.py | 14 | 5084 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Base class for running tests on a single device."""
# TODO(jbudorick) Deprecate and remove this class and all subclasses after
# any relevant parts have been ported to the new environment + test instance
# model.
import logging
from pylib import ports
from pylib.device import device_utils
from pylib.forwarder import Forwarder
from pylib.valgrind_tools import CreateTool
# TODO(frankf): Move this to pylib/utils
import lighttpd_server
# A file on device to store ports of net test server. The format of the file is
# test-spawner-server-port:test-server-port
NET_TEST_SERVER_PORT_INFO_FILE = 'net-test-server-ports'
class BaseTestRunner(object):
"""Base class for running tests on a single device."""
def __init__(self, device_serial, tool, cleanup_test_files=False):
"""
Args:
device: Tests will run on the device of this ID.
tool: Name of the Valgrind tool.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
self.device_serial = device_serial
self.device = device_utils.DeviceUtils(device_serial)
self.tool = CreateTool(tool, self.device)
self._http_server = None
self._forwarder_device_port = 8000
self.forwarder_base_url = ('http://localhost:%d' %
self._forwarder_device_port)
# We will allocate port for test server spawner when calling method
# LaunchChromeTestServerSpawner and allocate port for test server when
# starting it in TestServerThread.
self.test_server_spawner_port = 0
self.test_server_port = 0
self._cleanup_test_files = cleanup_test_files
def _PushTestServerPortInfoToDevice(self):
"""Pushes the latest port information to device."""
self.device.WriteFile(
self.device.GetExternalStoragePath() + '/' +
NET_TEST_SERVER_PORT_INFO_FILE,
'%d:%d' % (self.test_server_spawner_port, self.test_server_port))
def RunTest(self, test):
"""Runs a test. Needs to be overridden.
Args:
test: A test to run.
Returns:
Tuple containing:
(base_test_result.TestRunResults, tests to rerun or None)
"""
raise NotImplementedError
def InstallTestPackage(self):
"""Installs the test package once before all tests are run."""
pass
def SetUp(self):
"""Run once before all tests are run."""
self.InstallTestPackage()
def TearDown(self):
"""Run once after all tests are run."""
self.ShutdownHelperToolsForTestSuite()
if self._cleanup_test_files:
self.device.old_interface.RemovePushedFiles()
def LaunchTestHttpServer(self, document_root, port=None,
extra_config_contents=None):
"""Launches an HTTP server to serve HTTP tests.
Args:
document_root: Document root of the HTTP server.
port: port on which we want to the http server bind.
extra_config_contents: Extra config contents for the HTTP server.
"""
self._http_server = lighttpd_server.LighttpdServer(
document_root, port=port, extra_config_contents=extra_config_contents)
if self._http_server.StartupHttpServer():
logging.info('http server started: http://localhost:%s',
self._http_server.port)
else:
logging.critical('Failed to start http server')
self._ForwardPortsForHttpServer()
return (self._forwarder_device_port, self._http_server.port)
def _ForwardPorts(self, port_pairs):
"""Forwards a port."""
Forwarder.Map(port_pairs, self.device, self.tool)
def _UnmapPorts(self, port_pairs):
"""Unmap previously forwarded ports."""
for (device_port, _) in port_pairs:
Forwarder.UnmapDevicePort(device_port, self.device)
# Deprecated: Use ForwardPorts instead.
def StartForwarder(self, port_pairs):
"""Starts TCP traffic forwarding for the given |port_pairs|.
Args:
host_port_pairs: A list of (device_port, local_port) tuples to forward.
"""
self._ForwardPorts(port_pairs)
def _ForwardPortsForHttpServer(self):
"""Starts a forwarder for the HTTP server.
The forwarder forwards HTTP requests and responses between host and device.
"""
self._ForwardPorts([(self._forwarder_device_port, self._http_server.port)])
def _RestartHttpServerForwarderIfNecessary(self):
"""Restarts the forwarder if it's not open."""
# Checks to see if the http server port is being used. If not forwards the
# request.
# TODO(dtrainor): This is not always reliable because sometimes the port
# will be left open even after the forwarder has been killed.
if not ports.IsDevicePortUsed(self.device, self._forwarder_device_port):
self._ForwardPortsForHttpServer()
def ShutdownHelperToolsForTestSuite(self):
"""Shuts down the server and the forwarder."""
if self._http_server:
self._UnmapPorts([(self._forwarder_device_port, self._http_server.port)])
self._http_server.ShutdownHttpServer()
| bsd-3-clause |
smtchahal/cs-cz-map-installer | cs_cz_map_installer/mainwindow.py | 1 | 9909 | """
This module contains the MainWindow class responsible for rendering
the main window of the application.
"""
import sys
import os
import logging
from PySide import QtGui
from PySide.QtGui import QMessageBox
from .dialogs import ErrorDialog
from . import mapinstaller
LOGGING_FORMAT = ("[%(asctime)s] %(levelname)s "
"[%(name)s.%(funcName)s:%(lineno)d] %(message)s")
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
logger = logging.getLogger(__name__)
class MainWindow(QtGui.QMainWindow):
"""
The MainWindow class, responsible for rendering the main window of the
application
"""
def __init__(self):
"""Initialize class"""
super().__init__()
self.appname = QtGui.QApplication.applicationName()
self.initUI()
def initUI(self):
"""Set up the window"""
self.createMenus()
centralWidget = QtGui.QWidget()
mapPath = QtGui.QLabel('Map folder:')
self.mapPathEdit = QtGui.QLineEdit()
mapPathButton = QtGui.QPushButton('Select')
mapPathButton.clicked.connect(self.mapPathSelect)
gamePath = QtGui.QLabel('Game path:')
self.gamePathEdit = QtGui.QLineEdit()
gamePathButton = QtGui.QPushButton('Select')
gamePathButton.clicked.connect(self.gamePathSelect)
game = QtGui.QLabel('Game:')
self.gameDropDown = QtGui.QComboBox()
self.gameDropDown.addItem('Condition Zero')
self.gameDropDown.addItem('Counter Strike')
installButton = QtGui.QPushButton('&Install map')
installButton.clicked.connect(self.installAction)
layout = QtGui.QGridLayout()
layout.setSpacing(10)
layout.addWidget(mapPath, 0, 0)
layout.addWidget(self.mapPathEdit, 0, 1)
layout.addWidget(mapPathButton, 0, 2)
layout.addWidget(gamePath, 1, 0)
layout.addWidget(self.gamePathEdit, 1, 1)
layout.addWidget(gamePathButton, 1, 2)
layout.addWidget(game, 2, 0)
layout.addWidget(self.gameDropDown, 2, 1)
layout.addWidget(installButton, 2, 2)
centralWidget.setLayout(layout)
self.prefillPaths()
self.setCentralWidget(centralWidget)
self.setFixedSize(500, 0)
self.setWindowTitle(self.appname)
self.show()
def installMapProgress(self, mapPath, gamePath, gameType, replace=False):
"""Install map, showing a dialog box when finished"""
try:
mapinstaller.install_map(mapPath, gamePath, gameType, replace=replace)
self.dialog = QMessageBox()
self.dialog.setIcon(QMessageBox.Information)
self.dialog.setWindowTitle('Success')
self.dialog.setText('Installing map finished successfully.')
self.dialog.exec_()
except mapinstaller.SameDirectoryError:
self.dialog = ErrorDialog('Entered map path and game path refer'
' to the same directory.')
self.dialog.exec_()
except mapinstaller.InvalidGameDirectoryError:
self.dialog = ErrorDialog(('Given game directory is not a valid {0}'
' installation ("{0}" not found).').format(gameType))
self.dialog.exec_()
except mapinstaller.InvalidMapDirectoryError:
self.dialog = ErrorDialog('Invalid map directory.')
self.dialog.exec_()
except PermissionError:
self.dialog = ErrorDialog('No permission to write to {} directory,'
' did you run the application as administrator?'.format(gameType))
self.dialog.exec_()
except Exception as e:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(e).__name__, e.args)
logging.exception('Uncaught exception occured')
self.dialog = ErrorDialog(message)
self.dialog.exec_()
def installAction(self):
"""The handler for the "Install Map" click button"""
gamePath = self.gamePathEdit.text()
mapPath = self.mapPathEdit.text()
game = self.gameDropDown.currentText()
gameIndex = self.gameDropDown.currentIndex()
gameType = 'czero'
if gameIndex == 0:
gameType = 'czero'
elif gameIndex == 1:
gameType = 'cstrike'
if not os.path.isdir(gamePath) or not os.path.isdir(mapPath):
self.dialog = ErrorDialog('Please enter a valid directory path')
self.dialog.exec_()
return
try:
comparison = mapinstaller.compare_dirs(mapPath, gamePath, gameType)
if comparison is not None:
file1 = comparison[0]
file2 = comparison[1]
self.dialog = QMessageBox()
replaceButton = self.dialog.addButton('Replace',
QMessageBox.YesRole)
skipButton = self.dialog.addButton('Skip',
QMessageBox.NoRole)
cancelButton = self.dialog.addButton(QMessageBox.Cancel)
self.dialog.setIcon(QMessageBox.Question)
self.dialog.setWindowTitle('Replace files?')
text = ('Some files in {0} overlap with files in {1}'
'\nDo you want to replace these files in {0}'
' or skip them?')
self.dialog.setText(text.format(gamePath, mapPath))
self.dialog.exec_()
clicked = self.dialog.clickedButton()
if clicked == replaceButton:
self.installMapProgress(mapPath, gamePath, gameType,
replace=True)
elif clicked == skipButton:
self.installMapProgress(mapPath, gamePath, gameType)
elif clicked == cancelButton:
self.dialog = QMessageBox()
self.dialog.setIcon(QMessageBox.Warning)
self.dialog.setWindowTitle('Canceled')
self.dialog.setText('Operation canceled')
self.dialog.exec_()
return
else:
self.installMapProgress(mapPath, gamePath, gameType)
except mapinstaller.SameDirectoryError:
self.dialog = ErrorDialog('Entered map path and game path refer'
' to the same directory.')
self.dialog.exec_()
def prefillPaths(self):
"""
Pre-fill gamePath and mapPath with values found from
mapinstaller.get_game_path()
"""
if sys.platform.startswith('linux') or sys.platform == 'darwin':
# Linux and OS/X
home = os.path.expanduser('~')
paths = (home + '/.wine/drive_c/Program Files (x86)',
home + '/.wine/drive_c/Program Files',
home)
self.mapPathEdit.setText(home)
self.gamePathEdit.setText(mapinstaller.get_game_path(paths) or '')
elif sys.platform == 'win32':
# Windows
drives = mapinstaller.get_win_drives()
paths = []
for drive in drives:
paths.append(drive + r'\Program Files (x86)')
paths.append(drive + r'\Program Files')
paths.append(drive)
self.mapPathEdit.setText(drives[0] + '\\')
self.gamePathEdit.setText(mapinstaller.get_game_path(paths) or '')
def mapPathSelect(self):
"""Handle click on Select Map Path button"""
directoryPath = QtGui.QFileDialog.getExistingDirectory(self,
'Select map directory', self.mapPathEdit.text())
if directoryPath:
self.mapPathEdit.setText(directoryPath)
def gamePathSelect(self):
"""Handle click on Select Game Path button"""
directoryPath = QtGui.QFileDialog.getExistingDirectory(self,
'Select game directory', self.gamePathEdit.text())
if directoryPath:
self.gamePathEdit.setText(directoryPath)
def createMenus(self):
"""Create menus in MainWindow"""
# Create menubar
menubar = self.menuBar()
# Actions
exitAction = QtGui.QAction('&Exit', self)
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(self.close)
aboutAction = QtGui.QAction('&About ' + self.appname, self)
aboutAction.setStatusTip('About ' + self.appname)
aboutAction.triggered.connect(self.launchAboutDialog)
# Menus
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAction)
helpMenu = menubar.addMenu('&Help')
helpMenu.addAction(aboutAction)
def launchAboutDialog(self):
"""Launch the About dialog"""
appName = QtGui.QApplication.applicationName()
appVersion = QtGui.QApplication.applicationVersion()
title = 'About {}'.format(appName)
text = '''<p style="font-size: 16px; font-weight: bold;">
{0}
</p>
<p>Version {1}</p>
<p>
{0} was made by
<a href="https://github.com/smtchahal">Sumit Chahal</a>.
It is available under the MIT License (see
<a
href="https://github.com/smtchahal/cs-cz-map-installer/blob/master/LICENSE">full
licensing terms</a>). Source code is available on
<a
href="https://github.com/smtchahal/cs-cz-map-installer">GitHub</a>.
</p>
<p>
{0} uses <a
href="https://pypi.python.org/pypi/PySide/1.2.4">PySide
1.2.4</a>.
'''.format(appName, appVersion)
QMessageBox.about(self, title, text)
#self.dialog = AboutDialog()
#self.dialog.exec_()
| mit |
simonwydooghe/ansible | lib/ansible/modules/network/f5/bigip_file_copy.py | 19 | 21688 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_file_copy
short_description: Manage files in datastores on a BIG-IP
description:
- Manages files on a variety of datastores on a BIG-IP.
version_added: 2.8
options:
name:
description:
- The name of the file as it should reside on the BIG-IP.
- If this is not specified, then the filename provided in the C(source)
parameter is used instead.
type: str
source:
description:
- Specifies the path of the file to upload.
- This parameter is required if C(state) is C(present).
type: path
aliases:
- src
datastore:
description:
- Specifies the datastore to put the file in.
- There are several different datastores and each of them allows files
to be exposed in different ways.
- When C(external-monitor), the specified file will be stored as
an external monitor file and be available for use in external monitors
- When C(ifile), the specified file will be stored as an iFile.
- When C(lw4o6-table), the specified file will be store as an Lightweight 4
over 6 (lw4o6) tunnel binding table, which include an IPv6 address for the
lwB4, public IPv4 address, and restricted port set.
type: str
choices:
- external-monitor
- ifile
- lw4o6-table
default: ifile
force:
description:
- Force overwrite a file.
- By default, files will only be overwritten if the SHA of the file is different
for the given filename. This parameter can be used to force overwrite the file
even if it already exists and its SHA matches.
- The C(lw4o6-table) datastore does not keep checksums of its file. Therefore, you
would need to provide this argument to update any of these files.
type: bool
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Upload a file as an iFile
bigip_file_copy:
name: foo
source: /path/to/file.txt
datastore: ifile
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
# Upload a directory of files
- name: Recursively upload web related files in /var/tmp/project
find:
paths: /var/tmp/project
patterns: "^.*?\\.(?:html|?:css|?:js)$"
use_regex: yes
register: f
- name: Upload a directory of files as a set of iFiles
bigip_file_copy:
source: "{{ item.path }}"
datastore: ifile
provider:
password: secret
server: lb.mydomain.com
user: admin
loop: f
delegate_to: localhost
# End upload a directory of files
- name: Upload a file to use in an external monitor
bigip_file_copy:
source: /path/to/files/external.sh
datastore: external-monitor
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import hashlib
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import upload_file
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import upload_file
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
]
returnables = [
]
updatables = [
'checksum',
]
class ApiParameters(Parameters):
@property
def checksum(self):
"""Returns a plain checksum value without the leading extra characters
Values are stored in the REST as the following.
``"checksum": "SHA1:77002:b84015799949ac4acad87b81691455242a31e894"``
Returns:
string: The parsed SHA1 checksum.
"""
if self._values['checksum'] is None:
return None
return str(self._values['checksum'].split(':')[2])
class ModuleParameters(Parameters):
@property
def checksum(self):
"""Return SHA1 checksum of the file on disk
Returns:
string: The SHA1 checksum of the file.
References:
- https://stackoverflow.com/a/22058673/661215
"""
if self._values['datastore'] == 'lw4o6-table':
return None
sha1 = hashlib.sha1()
with open(self._values['source'], 'rb') as f:
while True:
data = f.read(4096)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
@property
def name(self):
if self._values['name'] is not None:
return self._values['name']
if self._values['source'] is None:
return None
return os.path.basename(self._values['source'])
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update() and not self.want.force:
return False
if self.module.check_mode:
return True
self.remove_from_device()
self.upload_to_device()
self.create_on_device()
self.remove_uploaded_file_from_device(self.want.name)
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
if self.module.check_mode:
return True
self.upload_to_device()
self.create_on_device()
self.remove_uploaded_file_from_device(self.want.name)
return True
def absent(self):
if self.exists():
return self.remove()
return False
def upload_to_device(self):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, self.want.source, self.want.name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
class IFileManager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/ifile/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ExternalMonitorManager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/external-monitor/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class Lw4o6Manager(BaseManager):
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['source-path'] = 'file:/var/config/rest/downloads/{0}'.format(self.want.name)
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/file/lwtunneltbl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.module.params['datastore'] == 'ifile':
manager = self.get_manager('v1')
elif self.module.params['datastore'] == 'external-monitor':
manager = self.get_manager('v2')
elif self.module.params['datastore'] == 'lw4o6-table':
manager = self.get_manager('v3')
else:
raise F5ModuleError(
"Unknown datastore specified."
)
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return IFileManager(**self.kwargs)
elif type == 'v2':
return ExternalMonitorManager(**self.kwargs)
elif type == 'v3':
return Lw4o6Manager(**self.kwargs)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(),
source=dict(
type='path',
aliases=['src'],
),
datastore=dict(
choices=[
'external-monitor',
'ifile',
'lw4o6-table',
],
default='ifile'
),
force=dict(type='bool', default='no'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['state', 'present', ['source']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
40223227/2015cdbg6w0622-40223227- | static/Brython3.1.1-20150328-091302/Lib/ui/dialog.py | 607 | 4994 | from . import widget
from browser import html, document
class Dialog(widget.DraggableWidget):
def __init__(self, id=None):
self._div_shell=html.DIV(
Class="ui-dialog ui-widget ui-widget-content ui-corner-all ui-front ui-draggable ui-resizable",
style={'position': 'absolute', 'height': 'auto', 'width': '300px',
'top': '98px', 'left': '140px', 'display': 'block'})
widget.DraggableWidget.__init__(self, self._div_shell, 'dialog', id)
_div_titlebar=html.DIV(Id="titlebar",
Class="ui-dialog-titlebar ui-widget-header ui-corner-all ui-helper-clearfix")
self._div_shell <= _div_titlebar
self._div_title=html.SPAN(Id="title", Class="ui-dialog-title")
_div_titlebar <= self._div_title
self._title_button=html.BUTTON(Title="close",
Class="ui-button ui-widget ui-state-default ui-corner-all ui-button-icon-only ui-dialog-titlebar-close")
def dialog_close(e):
#del document[self._div_shell.id]
del document[self._div_shell.id]
self._title_button.bind('click', dialog_close)
_span=html.SPAN(Class="ui-button-icon-primary ui-icon ui-icon-closethick")
self._title_button <= _span
_span=html.SPAN('close', Class="ui-button-text")
self._title_button <= _span
_div_titlebar <= self._title_button
self._div_dialog=html.DIV(Class="ui-dialog-content ui-widget-content",
style={'width': 'auto', 'min-height': '105px',
'max-height': 'none', 'height': 'auto'})
self._div_shell <= self._div_dialog
for _i in ['n', 'e', 's', 'w', 'se', 'sw', 'ne', 'nw']:
if _i == 'se':
_class="ui-resizable-handle ui-resizable-%s ui-icon ui-icon-gripsmall-diagonal-%s" % (_i, _i)
else:
_class="ui-resizable-handle ui-resizable-%s" % _i
self._div_shell <= html.DIV(Class=_class, style={'z-index': '90'})
document <= self._div_shell
def set_title(self, title):
self._div_title.set_text(title)
def set_body(self, body):
self._div_dialog.set_html(body)
class EntryDialog(Dialog):
def __init__(self, title, prompt, action, _id=None):
Dialog.__init__(self, _id)
self.set_title(title)
self.action = action
d_prompt = html.DIV(prompt, Class="ui-widget",
style=dict(float="left",paddingRight="10px"))
self.entry = html.INPUT()
body = html.DIV(d_prompt+self.entry,
style={'padding':'15px'})
b_ok = html.BUTTON("Ok")
b_ok.bind('click', self.ok)
b_cancel = html.BUTTON("Cancel")
b_cancel.bind('click', self.cancel)
body += html.DIV(b_ok+b_cancel, style={'padding':'15px'})
self._div_dialog <= body
def ok(self, ev):
self.result = self._div_shell.get(selector='INPUT')[0].value
self.action(self.result)
document.remove(self._div_shell)
def cancel(self, ev):
document.remove(self._div_shell)
class SelectDialog(Dialog):
def __init__(self, title, prompt, options, action, _id=None):
Dialog.__init__(self, _id)
self.set_title(title)
self.options = options
self.action = action
d_prompt = html.DIV(prompt, Class="ui-widget",
style=dict(float="left",paddingRight="10px"))
self.select = html.SELECT()
for option in options:
self.select <= html.OPTION(option)
body = html.DIV(d_prompt+self.select,
style={'padding':'15px'})
b_ok = html.BUTTON("Ok")
b_ok.bind('click', self.ok)
b_cancel = html.BUTTON("Cancel")
b_cancel.bind('click', self.cancel)
body += html.DIV(b_ok+b_cancel, style={'padding':'15px'})
self._div_dialog <= body
def ok(self, ev):
ix = self._div_shell.get(selector='SELECT')[0].selectedIndex
document.remove(self._div_shell)
self.action(self.options[ix])
def cancel(self, ev):
document.remove(self._div_shell)
class YesNoDialog(Dialog):
def __init__(self, title, prompt, action_if_yes, action_if_no, _id=None):
Dialog.__init__(self, _id)
self.set_title(title)
self.action_if_yes = action_if_yes
self.action_if_no = action_if_no
d_prompt = html.DIV(prompt, Class="ui-widget",
style=dict(float="left",paddingRight="10px"))
body = html.DIV(d_prompt, style={'padding':'15px'})
b_ok = html.BUTTON("Yes")
b_ok.bind('click', self.yes)
b_cancel = html.BUTTON("No")
b_cancel.bind('click', self.no)
body += html.DIV(b_ok+b_cancel, style={'padding':'15px'})
self._div_dialog <= body
def yes(self, ev):
document.remove(self._div_shell)
self.action_if_yes(self)
def no(self, ev):
document.remove(self._div_shell)
if self.action_if_no is not None:
self.action_if_no(self)
| gpl-3.0 |
ddurando/pox.carp | pox/lib/packet/rip.py | 47 | 6250 | # Copyright 2012 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
#
# RIP Message Format
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Command | Version | Zero |
# +---------------+---------------+-------------------------------+
# | |
# / RIP Entry (20 bytes) /
# / /
# +---------------------------------------------------------------+
#
#
# RIP Entry
#
# 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Address Family | Route Tag * |
# +-------------------------------+-------------------------------+
# | IP Address |
# +---------------------------------------------------------------+
# | Subnet Mask * |
# +---------------------------------------------------------------+
# | Next Hop * |
# +---------------------------------------------------------------+
# | Metric |
# +---------------------------------------------------------------+
#
# * RIP v2 only -- all zeros in RIP v1
#
#======================================================================
import struct
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import *
# RIP v2 multicast address
RIP2_ADDRESS = IPAddr("224.0.0.9")
# RIP v1/v2 UDP port
RIP_PORT = 520
RIP_REQUEST = 1
RIP_RESPONSE = 2
class rip (packet_base):
"""
RIP Message
"""
MIN_LEN = 24
RIP_PORT = RIP_PORT
RIP2_ADDRESS = RIP2_ADDRESS
def __init__(self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.entries = []
self.command = 0
self.version = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def hdr (self, payload):
s = struct.pack("!BBH", self.command, self.version, 0)
for e in self.entries:
s += e.pack()
return s
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('RIP packet data too short to parse')
return None
self.command, self.version, z = struct.unpack("!BBH", raw[:4])
if z != 0:
self.err("Zero field in RIP message not zero!")
return None
self.entries = []
raw = raw[4:]
while len(raw) >= 20:
try:
self.entries.append(RIPEntry(raw=raw[0:20]))
except Exception, e:
self.err('Exception parsing RIP entries: ' + str(e))
return None
raw = raw[20:]
if len(raw) != 0:
self.err('RIP had partial entry? %s bytes left' % (len(raw),))
self.parsed = True
def __str__ (self):
cmd = {RIP_REQUEST:"REQ",RIP_RESPONSE:"RESP"}.get(self.command,
str(self.command))
s = "[RIP ver:%i cmd:%s num:%i|" % (self.version,
cmd, len(self.entries))
for e in self.entries:
s += str(e) + "|"
s = s[:-1] + "]"
return s
RIPMessage = rip
class RIPEntry (packet_base):
def __init__ (self, raw=None, prev=None, **kw):
#TODO: netmask initializer?
packet_base.__init__(self)
self.address_family = 0
self.route_tag = 0
self.ip = None # IPAddr; bad default is to force setting
self._netmask = 0 # An IPAddr, but netmask property lets you assign a
# dotquad string or an integer number of bits.
self.next_hop = IP_ANY
self.metric = 0
if raw is not None:
self.parse(raw)
self._init(kw)
@property
def netmask (self):
return self._netmask
@netmask.setter
def netmask (self, netmask):
if isinstance(netmask, int):
netmask = cidr_to_netmask(netmask)
elif not isintance(netmask, IPAddr):
netmask = IPAddr(netmask)
self._netmask = netmask
@property
def network_bits (self):
"""
Returns the number of network bits. May raise an exception
if the netmask is not CIDR-compatible.
"""
return netmask_to_cidr(self._netmask)
@network_bits.setter
def network_bits (self, bits):
self._netmask = cidr_to_netmask(bits)
def hdr (self, payload):
s = struct.pack("!HHiiii", self.address_family, self.route_tag,
self.ip.toSigned(networkOrder=False),
self.netmask.toSigned(networkOrder=False),
self.next_hop.toSigned(networkOrder=False),
self.metric)
return s
def parse (self, raw):
self.address_family, self.route_tag, ip, netmask, next_hop, self.metric \
= struct.unpack("!HHiiii", raw)
self.ip = IPAddr(ip, networkOrder = False)
self._netmask = IPAddr(netmask, networkOrder = False)
self.next_hop = IPAddr(next_hop, networkOrder = False)
def __str__ (self):
s = "tag:%s ip:%s/%s nh:%s m:%s" % (self.route_tag, self.ip,
self._netmask, self.next_hop, self.metric)
return s
| apache-2.0 |
dxmgame/dxm-cocos | src/oslibs/cocos/cocos-src/tools/bindings-generator/backup/clang-llvm-3.3-pybinding/enumerations.py | 307 | 1077 | #===- enumerations.py - Python Enumerations ------------------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
Clang Enumerations
==================
This module provides static definitions of enumerations that exist in libclang.
Enumerations are typically defined as a list of tuples. The exported values are
typically munged into other types or classes at module load time.
All enumerations are centrally defined in this file so they are all grouped
together and easier to audit. And, maybe even one day this file will be
automatically generated by scanning the libclang headers!
"""
# Maps to CXTokenKind. Note that libclang maintains a separate set of token
# enumerations from the C++ API.
TokenKinds = [
('PUNCTUATION', 0),
('KEYWORD', 1),
('IDENTIFIER', 2),
('LITERAL', 3),
('COMMENT', 4),
]
__all__ = ['TokenKinds']
| mit |
modelrockettier/wget | testenv/conf/expected_files.py | 6 | 2239 | from difflib import unified_diff
import os
import sys
from conf import hook
from exc.test_failed import TestFailed
""" Post-Test Hook: ExpectedFiles
This is a Post-Test hook that checks the test directory for the files it
contains. A dictionary object is passed to it, which contains a mapping of
filenames and contents of all the files that the directory is expected to
contain.
Raises a TestFailed exception if the expected files are not found or if extra
files are found, else returns gracefully.
"""
@hook()
class ExpectedFiles:
def __init__(self, expected_fs):
self.expected_fs = expected_fs
@staticmethod
def gen_local_fs_snapshot():
snapshot = {}
for parent, dirs, files in os.walk('.'):
for name in files:
# pubring.kbx will be created by libgpgme if $HOME doesn't contain the .gnupg directory.
# setting $HOME to CWD (in base_test.py) breaks two Metalink tests, so we skip this file here.
if name == 'pubring.kbx':
continue
f = {'content': ''}
file_path = os.path.join(parent, name)
with open(file_path) as fp:
f['content'] = fp.read()
snapshot[file_path[2:]] = f
return snapshot
def __call__(self, test_obj):
local_fs = self.gen_local_fs_snapshot()
for file in self.expected_fs:
if file.name in local_fs:
local_file = local_fs.pop(file.name)
formatted_content = test_obj._replace_substring(file.content)
if formatted_content != local_file['content']:
for line in unified_diff(local_file['content'],
formatted_content,
fromfile='Actual',
tofile='Expected'):
print(line, file=sys.stderr)
raise TestFailed('Contents of %s do not match' % file.name)
else:
raise TestFailed('Expected file %s not found.' % file.name)
if local_fs:
print(local_fs)
raise TestFailed('Extra files downloaded.')
| gpl-3.0 |
tmhm/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
indrajitr/ansible | lib/ansible/modules/import_role.py | 7 | 2718 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
author: Ansible Core Team (@ansible)
module: import_role
short_description: Import a role into a play
description:
- Much like the C(roles:) keyword, this task loads a role, but it allows you to control when the role tasks run in
between other tasks of the play.
- Most keywords, loops and conditionals will only be applied to the imported tasks, not to this statement itself. If
you want the opposite behavior, use M(include_role) instead.
version_added: '2.4'
options:
name:
description:
- The name of the role to be executed.
type: str
required: true
tasks_from:
description:
- File to load from a role's C(tasks/) directory.
type: str
default: main
vars_from:
description:
- File to load from a role's C(vars/) directory.
type: str
default: main
defaults_from:
description:
- File to load from a role's C(defaults/) directory.
type: str
default: main
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
type: bool
default: yes
handlers_from:
description:
- File to load from a role's C(handlers/) directory.
type: str
default: main
version_added: '2.8'
notes:
- Handlers are made available to the whole play.
- Since Ansible 2.7 variables defined in C(vars) and C(defaults) for the role are exposed at playbook parsing time.
Due to this, these variables will be accessible to roles and tasks executed before the location of the
M(import_role) task.
- Unlike M(include_role) variable exposure is not configurable, and will always be exposed.
seealso:
- module: import_playbook
- module: import_tasks
- module: include_role
- module: include_tasks
- ref: playbooks_reuse_includes
description: More information related to including and importing playbooks, roles and tasks.
'''
EXAMPLES = r'''
- hosts: all
tasks:
- import_role:
name: myrole
- name: Run tasks/other.yaml instead of 'main'
import_role:
name: myrole
tasks_from: other
- name: Pass variables to role
import_role:
name: myrole
vars:
rolevar1: value from task
- name: Apply condition to each task in role
import_role:
name: myrole
when: not idontwanttorun
'''
RETURN = r'''
# This module does not return anything except tasks to execute.
'''
| gpl-3.0 |
hhbyyh/spark | examples/src/main/python/ml/als_example.py | 63 | 3026 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
if sys.version >= '3':
long = int
from pyspark.sql import SparkSession
# $example on$
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
from pyspark.sql import Row
# $example off$
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("ALSExample")\
.getOrCreate()
# $example on$
lines = spark.read.text("data/mllib/als/sample_movielens_ratings.txt").rdd
parts = lines.map(lambda row: row.value.split("::"))
ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]),
rating=float(p[2]), timestamp=long(p[3])))
ratings = spark.createDataFrame(ratingsRDD)
(training, test) = ratings.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
# Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating",
coldStartStrategy="drop")
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
predictions = model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))
# Generate top 10 movie recommendations for each user
userRecs = model.recommendForAllUsers(10)
# Generate top 10 user recommendations for each movie
movieRecs = model.recommendForAllItems(10)
# Generate top 10 movie recommendations for a specified set of users
users = ratings.select(als.getUserCol()).distinct().limit(3)
userSubsetRecs = model.recommendForUserSubset(users, 10)
# Generate top 10 user recommendations for a specified set of movies
movies = ratings.select(als.getItemCol()).distinct().limit(3)
movieSubSetRecs = model.recommendForItemSubset(movies, 10)
# $example off$
userRecs.show()
movieRecs.show()
userSubsetRecs.show()
movieSubSetRecs.show()
spark.stop()
| apache-2.0 |
eallovon/xivo-provd-plugins | plugins/xivo-cisco-sccp/common/common.py | 3 | 12217 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2014 Avencall
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import logging
import os
import re
from provd import synchronize
from provd import tzinform
from provd.devices.config import RawConfigError
from provd.devices.pgasso import BasePgAssociator, IMPROBABLE_SUPPORT, \
NO_SUPPORT, COMPLETE_SUPPORT, PROBABLE_SUPPORT
from provd.plugins import StandardPlugin, FetchfwPluginHelper,\
TemplatePluginHelper
from provd.servers.tftp.service import TFTPFileService
from provd.util import norm_mac, format_mac
from twisted.internet import defer, threads
logger = logging.getLogger('plugin.xivo-cisco')
class BaseCiscoPgAssociator(BasePgAssociator):
def __init__(self, models):
self._models = models
def _do_associate(self, vendor, model, version):
if vendor == u'Cisco':
if model is None:
# when model is None, give a score slightly higher than
# xivo-cisco-spa plugins
return PROBABLE_SUPPORT + 10
if model.startswith(u'SPA'):
return NO_SUPPORT
if model in self._models:
return COMPLETE_SUPPORT
return PROBABLE_SUPPORT
return IMPROBABLE_SUPPORT
class BaseCiscoDHCPDeviceInfoExtractor(object):
def extract(self, request, request_type):
return defer.succeed(self._do_extract(request))
_VDI_REGEX = re.compile(r'\bPhone (?:79(\d\d)|CP-79(\d\d)G|CP-(\d\d\d\d))')
def _do_extract(self, request):
options = request[u'options']
if 60 in options:
return self._extract_from_vdi(options[60])
def _extract_from_vdi(self, vdi):
# Vendor class identifier:
# "Cisco Systems, Inc." (Cisco 6901 9.1.2/9.2.1)
# "Cisco Systems, Inc. IP Phone 7912" (Cisco 7912 9.0.3)
# "Cisco Systems, Inc. IP Phone CP-7940G\x00" (Cisco 7940 8.1.2)
# "Cisco Systems, Inc. IP Phone CP-7941G\x00" (Cisco 7941 9.0.3)
# "Cisco Systems, Inc. IP Phone CP-7960G\x00" (Cisco 7960 8.1.2)
# "Cisco Systems, Inc. IP Phone CP-8961\x00" (Cisco 8961 9.1.2)
# "Cisco Systems, Inc. IP Phone CP-9951\x00" (Cisco 9951 9.1.2)
# "Cisco Systems Inc. Wireless Phone 7921"
if vdi.startswith('Cisco Systems'):
dev_info = {u'vendor': u'Cisco'}
m = self._VDI_REGEX.search(vdi)
if m:
_7900_modelnum = m.group(1) or m.group(2)
if _7900_modelnum:
if _7900_modelnum == '20':
fmt = u'79%s'
else:
fmt = u'79%sG'
dev_info[u'model'] = fmt % _7900_modelnum
else:
model_num = m.group(3)
dev_info[u'model'] = model_num.decode('ascii')
return dev_info
class BaseCiscoTFTPDeviceInfoExtractor(object):
_CIPC_REGEX = re.compile(r'^Communicator[/\\]')
_FILENAME_REGEXES = [
re.compile(r'^SEP([\dA-F]{12})\.cnf\.xml$'),
re.compile(r'^CTLSEP([\dA-F]{12})\.tlv$'),
re.compile(r'^ITLSEP([\dA-F]{12})\.tlv$'),
re.compile(r'^ITLFile\.tlv$'),
]
def extract(self, request, request_type):
return defer.succeed(self._do_extract(request))
def _do_extract(self, request):
packet = request['packet']
filename = packet['filename']
if self._CIPC_REGEX.match(filename):
return {u'vendor': u'Cisco', u'model': u'CIPC'}
for regex in self._FILENAME_REGEXES:
m = regex.match(filename)
if m:
dev_info = {u'vendor': u'Cisco'}
if m.lastindex == 1:
try:
dev_info[u'mac'] = norm_mac(m.group(1).decode('ascii'))
except ValueError, e:
logger.warning('Could not normalize MAC address: %s', e)
return dev_info
_ZONE_MAP = {
'Etc/GMT+12': u'Dateline Standard Time',
'Pacific/Samoa': u'Samoa Standard Time ',
'US/Hawaii': u'Hawaiian Standard Time ',
'US/Alaska': u'Alaskan Standard/Daylight Time',
'US/Pacific': u'Pacific Standard/Daylight Time',
'US/Mountain': u'Mountain Standard/Daylight Time',
'Etc/GMT+7': u'US Mountain Standard Time',
'US/Central': u'Central Standard/Daylight Time',
'America/Mexico_City': u'Mexico Standard/Daylight Time',
# '': u'Canada Central Standard Time',
# '': u'SA Pacific Standard Time',
'US/Eastern': u'Eastern Standard/Daylight Time',
'Etc/GMT+5': u'US Eastern Standard Time',
'Canada/Atlantic': u'Atlantic Standard/Daylight Time',
'Etc/GMT+4': u'SA Western Standard Time',
'Canada/Newfoundland': u'Newfoundland Standard/Daylight Time',
'America/Sao_Paulo': u'South America Standard/Daylight Time',
'Etc/GMT+3': u'SA Eastern Standard Time',
'Etc/GMT+2': u'Mid-Atlantic Standard/Daylight Time',
'Atlantic/Azores': u'Azores Standard/Daylight Time',
'Europe/London': u'GMT Standard/Daylight Time',
'Etc/GMT': u'Greenwich Standard Time',
# 'Europe/Belfast': u'W. Europe Standard/Daylight Time',
# '': u'GTB Standard/Daylight Time',
'Egypt': u'Egypt Standard/Daylight Time',
'Europe/Athens': u'E. Europe Standard/Daylight Time',
# 'Europe/Rome': u'Romance Standard/Daylight Time',
'Europe/Paris': u'Central Europe Standard/Daylight Time',
'Africa/Johannesburg': u'South Africa Standard Time ',
'Asia/Jerusalem': u'Jerusalem Standard/Daylight Time',
'Asia/Riyadh': u'Saudi Arabia Standard Time',
'Europe/Moscow': u'Russian Standard/Daylight Time', # Russia covers 8 time zones.
'Iran': u'Iran Standard/Daylight Time',
# '': u'Caucasus Standard/Daylight Time',
'Etc/GMT-4': u'Arabian Standard Time',
'Asia/Kabul': u'Afghanistan Standard Time ',
'Etc/GMT-5': u'West Asia Standard Time',
# '': u'Ekaterinburg Standard Time',
'Asia/Calcutta': u'India Standard Time',
'Etc/GMT-6': u'Central Asia Standard Time ',
'Etc/GMT-7': u'SE Asia Standard Time',
# '': u'China Standard/Daylight Time', # China doesn't observe DST since 1991
'Asia/Taipei': u'Taipei Standard Time',
'Asia/Tokyo': u'Tokyo Standard Time',
'Australia/ACT': u'Cen. Australia Standard/Daylight Time',
'Australia/Brisbane': u'AUS Central Standard Time',
# '': u'E. Australia Standard Time',
# '': u'AUS Eastern Standard/Daylight Time',
'Etc/GMT-10': u'West Pacific Standard Time',
'Australia/Tasmania': u'Tasmania Standard/Daylight Time',
'Etc/GMT-11': u'Central Pacific Standard Time',
'Etc/GMT-12': u'Fiji Standard Time',
# '': u'New Zealand Standard/Daylight Time',
}
def _gen_tz_map():
result = {}
for tz_name, param_value in _ZONE_MAP.iteritems():
tzinfo = tzinform.get_timezone_info(tz_name)
inner_dict = result.setdefault(tzinfo['utcoffset'].as_minutes, {})
if not tzinfo['dst']:
inner_dict[None] = param_value
else:
inner_dict[tzinfo['dst']['as_string']] = param_value
return result
class BaseCiscoSccpPlugin(StandardPlugin):
# XXX actually, we didn't find which encoding Cisco SCCP are using
_ENCODING = 'UTF-8'
_TZ_MAP = _gen_tz_map()
_TZ_VALUE_DEF = u'Eastern Standard/Daylight Time'
_LOCALE = {
# <locale>: (<name>, <lang code>, <network locale>)
u'de_DE': (u'german_germany', u'de', u'germany'),
u'en_US': (u'english_united_states', u'en', u'united_states'),
u'es_ES': (u'spanish_spain', u'es', u'spain'),
u'fr_FR': (u'french_france', u'fr', u'france'),
u'fr_CA': (u'french_france', u'fr', u'canada')
}
def __init__(self, app, plugin_dir, gen_cfg, spec_cfg):
StandardPlugin.__init__(self, app, plugin_dir, gen_cfg, spec_cfg)
self._tpl_helper = TemplatePluginHelper(plugin_dir)
downloaders = FetchfwPluginHelper.new_downloaders(gen_cfg.get('proxies'))
fetchfw_helper = FetchfwPluginHelper(plugin_dir, downloaders)
self.services = fetchfw_helper.services()
self.tftp_service = TFTPFileService(self._tftpboot_dir)
dhcp_dev_info_extractor = BaseCiscoDHCPDeviceInfoExtractor()
tftp_dev_info_extractor = BaseCiscoTFTPDeviceInfoExtractor()
def _add_locale(self, raw_config):
locale = raw_config.get(u'locale')
if locale in self._LOCALE:
raw_config[u'XX_locale'] = self._LOCALE[locale]
def _tzinfo_to_value(self, tzinfo):
utcoffset_m = tzinfo['utcoffset'].as_minutes
if utcoffset_m not in self._TZ_MAP:
# No UTC offset matching. Let's try finding one relatively close...
for supp_offset in [30, -30, 60, -60]:
if utcoffset_m + supp_offset in self._TZ_MAP:
utcoffset_m += supp_offset
break
else:
return self._TZ_VALUE_DEF
dst_map = self._TZ_MAP[utcoffset_m]
if tzinfo['dst']:
dst_key = tzinfo['dst']['as_string']
else:
dst_key = None
if dst_key not in dst_map:
# No DST rules matching. Fallback on all-standard time or random
# DST rule in last resort...
if None in dst_map:
dst_key = None
else:
dst_key = dst_map.keys[0]
return dst_map[dst_key]
def _add_timezone(self, raw_config):
raw_config[u'XX_timezone'] = self._TZ_VALUE_DEF
if u'timezone' in raw_config:
try:
tzinfo = tzinform.get_timezone_info(raw_config[u'timezone'])
except tzinform.TimezoneNotFoundError, e:
logger.info('Unknown timezone: %s', e)
else:
raw_config[u'XX_timezone'] = self._tzinfo_to_value(tzinfo)
def _update_call_managers(self, raw_config):
for priority, call_manager in raw_config[u'sccp_call_managers'].iteritems():
call_manager[u'XX_priority'] = unicode(int(priority) - 1)
def _dev_specific_filename(self, device):
# Return the device specific filename (not pathname) of device
fmted_mac = format_mac(device[u'mac'], separator='', uppercase=True)
return 'SEP%s.cnf.xml' % fmted_mac
def _check_config(self, raw_config):
if u'tftp_port' not in raw_config:
raise RawConfigError('only support configuration via TFTP')
def _check_device(self, device):
if u'mac' not in device:
raise Exception('MAC address needed for device configuration')
def configure(self, device, raw_config):
self._check_config(raw_config)
self._check_device(device)
filename = self._dev_specific_filename(device)
tpl = self._tpl_helper.get_dev_template(filename, device)
# TODO check support for addons, and test what the addOnModules is
# really doing...
raw_config[u'XX_addons'] = ''
self._add_locale(raw_config)
self._add_timezone(raw_config)
self._update_call_managers(raw_config)
path = os.path.join(self._tftpboot_dir, filename)
self._tpl_helper.dump(tpl, raw_config, path, self._ENCODING)
def deconfigure(self, device):
path = os.path.join(self._tftpboot_dir, self._dev_specific_filename(device))
try:
os.remove(path)
except OSError, e:
# ignore
logger.info('error while removing file: %s', e)
def synchronize(self, device, raw_config):
return defer.fail(Exception('operation not supported'))
| gpl-3.0 |
40223250/40223250test | static/Brython3.1.1-20150328-091302/Lib/_collections.py | 603 | 19111 | # "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
| gpl-3.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/ctypes/test/test_funcptr.py | 32 | 3898 | import os, unittest
from ctypes import *
try:
WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
WINFUNCTYPE = CFUNCTYPE
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class CFuncPtrTestCase(unittest.TestCase):
def test_basic(self):
X = WINFUNCTYPE(c_int, c_int, c_int)
def func(*args):
return len(args)
x = X(func)
self.assertEqual(x.restype, c_int)
self.assertEqual(x.argtypes, (c_int, c_int))
self.assertEqual(sizeof(x), sizeof(c_voidp))
self.assertEqual(sizeof(X), sizeof(c_voidp))
def test_first(self):
StdCallback = WINFUNCTYPE(c_int, c_int, c_int)
CdeclCallback = CFUNCTYPE(c_int, c_int, c_int)
def func(a, b):
return a + b
s = StdCallback(func)
c = CdeclCallback(func)
self.assertEqual(s(1, 2), 3)
self.assertEqual(c(1, 2), 3)
# The following no longer raises a TypeError - it is now
# possible, as in C, to call cdecl functions with more parameters.
#self.assertRaises(TypeError, c, 1, 2, 3)
self.assertEqual(c(1, 2, 3, 4, 5, 6), 3)
if not WINFUNCTYPE is CFUNCTYPE and os.name != "ce":
self.assertRaises(TypeError, s, 1, 2, 3)
def test_structures(self):
WNDPROC = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
def wndproc(hwnd, msg, wParam, lParam):
return hwnd + msg + wParam + lParam
HINSTANCE = c_int
HICON = c_int
HCURSOR = c_int
LPCTSTR = c_char_p
class WNDCLASS(Structure):
_fields_ = [("style", c_uint),
("lpfnWndProc", WNDPROC),
("cbClsExtra", c_int),
("cbWndExtra", c_int),
("hInstance", HINSTANCE),
("hIcon", HICON),
("hCursor", HCURSOR),
("lpszMenuName", LPCTSTR),
("lpszClassName", LPCTSTR)]
wndclass = WNDCLASS()
wndclass.lpfnWndProc = WNDPROC(wndproc)
WNDPROC_2 = WINFUNCTYPE(c_long, c_int, c_int, c_int, c_int)
# This is no longer true, now that WINFUNCTYPE caches created types internally.
## # CFuncPtr subclasses are compared by identity, so this raises a TypeError:
## self.assertRaises(TypeError, setattr, wndclass,
## "lpfnWndProc", WNDPROC_2(wndproc))
# instead:
self.assertIs(WNDPROC, WNDPROC_2)
# 'wndclass.lpfnWndProc' leaks 94 references. Why?
self.assertEqual(wndclass.lpfnWndProc(1, 2, 3, 4), 10)
f = wndclass.lpfnWndProc
del wndclass
del wndproc
self.assertEqual(f(10, 11, 12, 13), 46)
def test_dllfunctions(self):
def NoNullHandle(value):
if not value:
raise WinError()
return value
strchr = lib.my_strchr
strchr.restype = c_char_p
strchr.argtypes = (c_char_p, c_char)
self.assertEqual(strchr("abcdefghi", "b"), "bcdefghi")
self.assertEqual(strchr("abcdefghi", "x"), None)
strtok = lib.my_strtok
strtok.restype = c_char_p
# Neither of this does work: strtok changes the buffer it is passed
## strtok.argtypes = (c_char_p, c_char_p)
## strtok.argtypes = (c_string, c_char_p)
def c_string(init):
size = len(init) + 1
return (c_char*size)(*init)
s = "a\nb\nc"
b = c_string(s)
## b = (c_char * (len(s)+1))()
## b.value = s
## b = c_string(s)
self.assertEqual(strtok(b, "\n"), "a")
self.assertEqual(strtok(None, "\n"), "b")
self.assertEqual(strtok(None, "\n"), "c")
self.assertEqual(strtok(None, "\n"), None)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Tejal011089/med2-app | startup/webutils.py | 30 | 1121 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
from webnotes.utils import cint
def get_website_settings(context):
post_login = []
cart_enabled = cint(webnotes.conn.get_default("shopping_cart_enabled"))
if cart_enabled:
post_login += [{"label": "Cart", "url": "cart", "icon": "icon-shopping-cart", "class": "cart-count"},
{"class": "divider"}]
post_login += [
{"label": "Profile", "url": "profile", "icon": "icon-user"},
{"label": "Addresses", "url": "addresses", "icon": "icon-map-marker"},
{"label": "My Orders", "url": "orders", "icon": "icon-list"},
{"label": "My Tickets", "url": "tickets", "icon": "icon-tags"},
{"label": "Invoices", "url": "invoices", "icon": "icon-file-text"},
{"label": "Shipments", "url": "shipments", "icon": "icon-truck"},
{"class": "divider"}
]
context.update({
"shopping_cart_enabled": cart_enabled,
"post_login": post_login + context.get("post_login", [])
})
if not context.get("favicon"):
context["favicon"] = "app/images/favicon.ico" | agpl-3.0 |
lyndsysimon/osf.io | api/base/middleware.py | 13 | 2315 | from pymongo.errors import OperationFailure
from raven.contrib.django.raven_compat.models import sentry_exception_handler
from framework.transactions import commands, messages, utils
from .api_globals import api_globals
# TODO: Verify that a transaction is being created for every
# individual request.
class TokuTransactionsMiddleware(object):
"""TokuMX transaction middleware."""
def process_request(self, request):
"""Begin a transaction if one doesn't already exist."""
try:
commands.begin()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.TRANSACTION_EXISTS_ERROR not in message:
raise err
def process_exception(self, request, exception):
"""If an exception occurs, rollback the current transaction
if it exists.
"""
sentry_exception_handler(request=request)
try:
commands.rollback()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.NO_TRANSACTION_ERROR not in message:
raise
commands.disconnect()
return None
def process_response(self, request, response):
"""Commit transaction if it exists, rolling back in an
exception occurs.
"""
try:
commands.commit()
except OperationFailure as err:
message = utils.get_error_message(err)
if messages.NO_TRANSACTION_TO_COMMIT_ERROR not in message:
raise err
except Exception as err:
try:
commands.rollback()
except OperationFailure:
pass
else:
raise err
commands.disconnect()
return response
class DjangoGlobalMiddleware(object):
"""
Store request object on a thread-local variable for use in database caching mechanism.
"""
def process_request(self, request):
api_globals.request = request
def process_exception(self, request, exception):
sentry_exception_handler(request=request)
api_globals.request = None
return None
def process_response(self, request, response):
api_globals.request = None
return response
| apache-2.0 |
bpgc-cte/python2017 | Week 7/django/lib/python3.6/site-packages/django/utils/regex_helper.py | 45 | 12911 | """
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
from __future__ import unicode_literals
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.six.moves import zip
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": "0",
"D": "x",
"s": " ",
"S": "x",
"w": "x",
"W": "!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
r"""
Given a reg-exp pattern, normalizes it to an iterable of forms that
suffice for reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(4) Ignore look-ahead and look-behind assertions.
(5) Raise an error on any disjunctive ('|') constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = next(pattern_iter)
except StopIteration:
return [('', [])]
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError('Awaiting Implementation')
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = next(pattern_iter)
result.append(ch)
ch, escaped = next(pattern_iter)
while escaped or ch != ']':
ch, escaped = next(pattern_iter)
elif ch == '(':
# Some kind of group.
ch, escaped = next(pattern_iter)
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group((("%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = next(pattern_iter)
if ch in '!=<':
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch in 'iLmsu#':
warnings.warn(
'Using (?%s) in url() patterns is deprecated.' % ch,
RemovedInDjango21Warning
)
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = next(pattern_iter)
if ch not in ('<', '='):
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
if ch == '<':
terminal_char = '>'
# We are in a named backreference.
else:
terminal_char = ')'
name = []
ch, escaped = next(pattern_iter)
while ch != terminal_char:
name.append(ch)
ch, escaped = next(pattern_iter)
param = ''.join(name)
# Named backreferences have already consumed the
# parenthesis.
if terminal_char != ')':
result.append(Group((("%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
else:
result.append(Group((("%%(%s)s" % param), None)))
elif ch in "*?+{":
# Quantifiers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quantifier, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = next(pattern_iter)
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return [('', [])]
return list(zip(*flatten_result(result)))
def next_char(input_iter):
r"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = next(input_iter)
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurrences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = next(input_iter)
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = next(input_iter)
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = next(input_iter)
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = ['']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, six.string_types):
continue
piece = ''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = ''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
| mit |
crosick/zhishu | ENV/lib/python2.7/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
chase-kernan/lsc-seis-gcm | gcm/web/coinc.py | 1 | 2126 |
import bottle
from gcm.data import channels as chn, coinc as co, raw
from gcm.web.utils import *
import numpy as np
@bottle.get('/coinc/group/<group_id:int>')
@bottle.view('coincs.html')
def get_group(group_id):
return {'root': WEB_ROOT, 'group': _get_group(group_id)}
@bottle.get('/coinc/cross/group/<group_id:int>')
@bottle.view('coinc_cross.html')
def get_group(group_id):
return {'root': WEB_ROOT, 'group': _get_group(group_id)}
@bottle.get('/coinc/group/<group_id:int>/all')
@succeed_or_fail
def get_all_coinc(group_id):
group = chn.get_group(group_id)
coincs = co.coincs_to_list(group)
limit = int(bottle.request.query.limit or len(coincs))
return {'coincs': coincs[:limit]}
@bottle.get('/coinc/group/<group_id:int>/time-series/<coinc_id:int>')
@bottle.view('coinc_time_series.html')
def get_time_series(group_id, coinc_id):
return {'root': WEB_ROOT,
'group': _get_group(group_id),
'coinc': _get_coinc(chn.get_group(group_id), coinc_id),
'sampling_rate': raw.SAMPLING_RATE,
'sample_buffer': raw.SAMPLE_BUFFER,
'sample_duration': raw.SAMPLE_DURATION}
@bottle.get('/coinc/group/<group_id:int>/time-series/<coinc_id:int>/all')
@succeed_or_fail
def get_time_series_data(group_id, coinc_id):
group = chn.get_group(group_id)
with co.open_coincs(group, mode='r') as coincs:
coinc = coincs[coinc_id]
raw_series, bandpassed = raw.get_raw_coinc(group, coinc)
return {'raw': [s.tolist() for s in raw_series],
'bandpassed': [s.tolist() for s in bandpassed]}
@bottle.view('coinc_windows.html')
def get_group(group_id):
return {'root': WEB_ROOT, 'group': _get_group(group_id)}
def _get_group(group_id):
try:
group = chn.get_group(group_id)
except:
bottle.abort(404, "No coincidences for {0}".format(group))
group_dict = group._asdict()
group_dict['channels'] = [c._asdict() for c in group.channels]
return group_dict
def _get_coinc(group, coinc_id):
with co.open_coincs(group, mode='r') as coincs:
return co.coinc_to_dict(coincs[coinc_id])
| gpl-2.0 |
ray-project/ray | python/ray/tune/examples/horovod_simple.py | 1 | 4364 | import torch
import numpy as np
import ray
from ray import tune
from ray.tune.integration.horovod import DistributedTrainableCreator
import time
def sq(x):
m2 = 1.
m1 = -20.
m0 = 50.
return m2 * x * x + m1 * x + m0
def qu(x):
m3 = 10.
m2 = 5.
m1 = -20.
m0 = -5.
return m3 * x * x * x + m2 * x * x + m1 * x + m0
class Net(torch.nn.Module):
def __init__(self, mode="sq"):
super(Net, self).__init__()
if mode == "square":
self.mode = 0
self.param = torch.nn.Parameter(torch.FloatTensor([1., -1.]))
else:
self.mode = 1
self.param = torch.nn.Parameter(torch.FloatTensor([1., -1., 1.]))
def forward(self, x):
if ~self.mode:
return x * x + self.param[0] * x + self.param[1]
else:
return_val = 10 * x * x * x
return_val += self.param[0] * x * x
return_val += self.param[1] * x + self.param[2]
return return_val
def train(config):
import torch
import horovod.torch as hvd
hvd.init()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
mode = config["mode"]
net = Net(mode).to(device)
optimizer = torch.optim.SGD(
net.parameters(),
lr=config["lr"],
)
optimizer = hvd.DistributedOptimizer(optimizer)
num_steps = 5
print(hvd.size())
np.random.seed(1 + hvd.rank())
torch.manual_seed(1234)
# To ensure consistent initialization across slots,
hvd.broadcast_parameters(net.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
start = time.time()
x_max = config["x_max"]
for step in range(1, num_steps + 1):
features = torch.Tensor(np.random.rand(1) * 2 * x_max -
x_max).to(device)
if mode == "square":
labels = sq(features)
else:
labels = qu(features)
optimizer.zero_grad()
outputs = net(features)
loss = torch.nn.MSELoss()(outputs, labels)
loss.backward()
optimizer.step()
time.sleep(0.1)
tune.report(loss=loss.item())
total = time.time() - start
print(f"Took {total:0.3f} s. Avg: {total / num_steps:0.3f} s.")
def tune_horovod(hosts_per_trial,
slots_per_host,
num_samples,
use_gpu,
mode="square",
x_max=1.):
horovod_trainable = DistributedTrainableCreator(
train,
use_gpu=use_gpu,
num_hosts=hosts_per_trial,
num_slots=slots_per_host,
replicate_pem=False)
analysis = tune.run(
horovod_trainable,
metric="loss",
mode="min",
config={
"lr": tune.uniform(0.1, 1),
"mode": mode,
"x_max": x_max
},
num_samples=num_samples,
fail_fast=True)
print("Best hyperparameters found were: ", analysis.best_config)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", type=str, default="square", choices=["square", "cubic"])
parser.add_argument(
"--learning_rate", type=float, default=0.1, dest="learning_rate")
parser.add_argument("--x_max", type=float, default=1., dest="x_max")
parser.add_argument("--gpu", action="store_true")
parser.add_argument(
"--smoke-test",
action="store_true",
help=("Finish quickly for testing."))
parser.add_argument("--hosts-per-trial", type=int, default=1)
parser.add_argument("--slots-per-host", type=int, default=2)
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using "
"Ray Client.")
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=2)
elif args.server_address:
ray.util.connect(args.server_address)
# import ray
# ray.init(address="auto") # assumes ray is started with ray up
tune_horovod(
hosts_per_trial=args.hosts_per_trial,
slots_per_host=args.slots_per_host,
num_samples=2 if args.smoke_test else 10,
use_gpu=args.gpu,
mode=args.mode,
x_max=args.x_max)
| apache-2.0 |
adityacs/ansible | lib/ansible/modules/cloud/rackspace/rax_keypair.py | 50 | 5128 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rax_keypair
short_description: Create a keypair for use with Rackspace Cloud Servers
description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
name:
description:
- Name of keypair
required: true
public_key:
description:
- Public Key string to upload. Can be a file path or string
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
author: "Matt Martz (@sivel)"
notes:
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
- The ability to specify a file path for the public key was added in 1.7
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
region: DFW
register: keypair
- name: Create local public key
local_action:
module: copy
content: "{{ keypair.keypair.public_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}.pub"
- name: Create local private key
local_action:
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
- name: Create a keypair
hosts: localhost
gather_facts: False
tasks:
- name: keypair request
local_action:
module: rax_keypair
credentials: ~/.raxpub
name: my_keypair
public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
region: DFW
register: keypair
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
if cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
keypair = {}
if state == 'present':
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
f.close()
except Exception as e:
module.fail_json(msg='Failed to load %s' % public_key)
try:
keypair = cs.keypairs.find(name=name)
except cs.exceptions.NotFound:
try:
keypair = cs.keypairs.create(name, public_key)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
keypair = cs.keypairs.find(name=name)
except:
pass
if keypair:
try:
keypair.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, keypair=rax_to_dict(keypair))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
setup_rax_module(module, pyrax)
rax_keypair(module, name, public_key, state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
eckucukoglu/arm-linux-gnueabihf | arm-linux-gnueabihf/libc/usr/lib/python2.7/encodings/cp737.py | 593 | 34937 | """ Python Character Mapping Codec cp737 generated from 'VENDORS/MICSFT/PC/CP737.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp737',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0391' # 0x0080 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0x0081 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0x0082 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0x0083 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0x0084 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0x0085 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0x0086 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0x0087 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0x0088 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0x0089 -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0x008a -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0x008b -> GREEK CAPITAL LETTER MU
u'\u039d' # 0x008c -> GREEK CAPITAL LETTER NU
u'\u039e' # 0x008d -> GREEK CAPITAL LETTER XI
u'\u039f' # 0x008e -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0x008f -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0x0090 -> GREEK CAPITAL LETTER RHO
u'\u03a3' # 0x0091 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0x0092 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0x0093 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0x0094 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0x0095 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0x0096 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0x0097 -> GREEK CAPITAL LETTER OMEGA
u'\u03b1' # 0x0098 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0x0099 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0x009a -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0x009b -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0x009c -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0x009d -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0x009e -> GREEK SMALL LETTER ETA
u'\u03b8' # 0x009f -> GREEK SMALL LETTER THETA
u'\u03b9' # 0x00a0 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0x00a1 -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0x00a2 -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0x00a3 -> GREEK SMALL LETTER MU
u'\u03bd' # 0x00a4 -> GREEK SMALL LETTER NU
u'\u03be' # 0x00a5 -> GREEK SMALL LETTER XI
u'\u03bf' # 0x00a6 -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0x00a7 -> GREEK SMALL LETTER PI
u'\u03c1' # 0x00a8 -> GREEK SMALL LETTER RHO
u'\u03c3' # 0x00a9 -> GREEK SMALL LETTER SIGMA
u'\u03c2' # 0x00aa -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c4' # 0x00ab -> GREEK SMALL LETTER TAU
u'\u03c5' # 0x00ac -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0x00ad -> GREEK SMALL LETTER PHI
u'\u03c7' # 0x00ae -> GREEK SMALL LETTER CHI
u'\u03c8' # 0x00af -> GREEK SMALL LETTER PSI
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
u'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
u'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
u'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
u'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
u'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
u'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
u'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\u03c9' # 0x00e0 -> GREEK SMALL LETTER OMEGA
u'\u03ac' # 0x00e1 -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0x00e2 -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0x00e3 -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03ca' # 0x00e4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03af' # 0x00e5 -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03cc' # 0x00e6 -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0x00e7 -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03cb' # 0x00e8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03ce' # 0x00e9 -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\u0386' # 0x00ea -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\u0388' # 0x00eb -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0x00ec -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0x00ed -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\u038c' # 0x00ee -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\u038e' # 0x00ef -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0x00f0 -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
u'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
u'\u03aa' # 0x00f4 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0x00f5 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u2248' # 0x00f7 -> ALMOST EQUAL TO
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\u221a' # 0x00fb -> SQUARE ROOT
u'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b7: 0x00fa, # MIDDLE DOT
0x00f7: 0x00f6, # DIVISION SIGN
0x0386: 0x00ea, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x0388: 0x00eb, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x0389: 0x00ec, # GREEK CAPITAL LETTER ETA WITH TONOS
0x038a: 0x00ed, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x038c: 0x00ee, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x038e: 0x00ef, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x038f: 0x00f0, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x0391: 0x0080, # GREEK CAPITAL LETTER ALPHA
0x0392: 0x0081, # GREEK CAPITAL LETTER BETA
0x0393: 0x0082, # GREEK CAPITAL LETTER GAMMA
0x0394: 0x0083, # GREEK CAPITAL LETTER DELTA
0x0395: 0x0084, # GREEK CAPITAL LETTER EPSILON
0x0396: 0x0085, # GREEK CAPITAL LETTER ZETA
0x0397: 0x0086, # GREEK CAPITAL LETTER ETA
0x0398: 0x0087, # GREEK CAPITAL LETTER THETA
0x0399: 0x0088, # GREEK CAPITAL LETTER IOTA
0x039a: 0x0089, # GREEK CAPITAL LETTER KAPPA
0x039b: 0x008a, # GREEK CAPITAL LETTER LAMDA
0x039c: 0x008b, # GREEK CAPITAL LETTER MU
0x039d: 0x008c, # GREEK CAPITAL LETTER NU
0x039e: 0x008d, # GREEK CAPITAL LETTER XI
0x039f: 0x008e, # GREEK CAPITAL LETTER OMICRON
0x03a0: 0x008f, # GREEK CAPITAL LETTER PI
0x03a1: 0x0090, # GREEK CAPITAL LETTER RHO
0x03a3: 0x0091, # GREEK CAPITAL LETTER SIGMA
0x03a4: 0x0092, # GREEK CAPITAL LETTER TAU
0x03a5: 0x0093, # GREEK CAPITAL LETTER UPSILON
0x03a6: 0x0094, # GREEK CAPITAL LETTER PHI
0x03a7: 0x0095, # GREEK CAPITAL LETTER CHI
0x03a8: 0x0096, # GREEK CAPITAL LETTER PSI
0x03a9: 0x0097, # GREEK CAPITAL LETTER OMEGA
0x03aa: 0x00f4, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x03ab: 0x00f5, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x03ac: 0x00e1, # GREEK SMALL LETTER ALPHA WITH TONOS
0x03ad: 0x00e2, # GREEK SMALL LETTER EPSILON WITH TONOS
0x03ae: 0x00e3, # GREEK SMALL LETTER ETA WITH TONOS
0x03af: 0x00e5, # GREEK SMALL LETTER IOTA WITH TONOS
0x03b1: 0x0098, # GREEK SMALL LETTER ALPHA
0x03b2: 0x0099, # GREEK SMALL LETTER BETA
0x03b3: 0x009a, # GREEK SMALL LETTER GAMMA
0x03b4: 0x009b, # GREEK SMALL LETTER DELTA
0x03b5: 0x009c, # GREEK SMALL LETTER EPSILON
0x03b6: 0x009d, # GREEK SMALL LETTER ZETA
0x03b7: 0x009e, # GREEK SMALL LETTER ETA
0x03b8: 0x009f, # GREEK SMALL LETTER THETA
0x03b9: 0x00a0, # GREEK SMALL LETTER IOTA
0x03ba: 0x00a1, # GREEK SMALL LETTER KAPPA
0x03bb: 0x00a2, # GREEK SMALL LETTER LAMDA
0x03bc: 0x00a3, # GREEK SMALL LETTER MU
0x03bd: 0x00a4, # GREEK SMALL LETTER NU
0x03be: 0x00a5, # GREEK SMALL LETTER XI
0x03bf: 0x00a6, # GREEK SMALL LETTER OMICRON
0x03c0: 0x00a7, # GREEK SMALL LETTER PI
0x03c1: 0x00a8, # GREEK SMALL LETTER RHO
0x03c2: 0x00aa, # GREEK SMALL LETTER FINAL SIGMA
0x03c3: 0x00a9, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00ab, # GREEK SMALL LETTER TAU
0x03c5: 0x00ac, # GREEK SMALL LETTER UPSILON
0x03c6: 0x00ad, # GREEK SMALL LETTER PHI
0x03c7: 0x00ae, # GREEK SMALL LETTER CHI
0x03c8: 0x00af, # GREEK SMALL LETTER PSI
0x03c9: 0x00e0, # GREEK SMALL LETTER OMEGA
0x03ca: 0x00e4, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x03cb: 0x00e8, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x03cc: 0x00e6, # GREEK SMALL LETTER OMICRON WITH TONOS
0x03cd: 0x00e7, # GREEK SMALL LETTER UPSILON WITH TONOS
0x03ce: 0x00e9, # GREEK SMALL LETTER OMEGA WITH TONOS
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 |
taedori81/e-commerce-template | saleor/dashboard/product/views.py | 7 | 10930 | from __future__ import unicode_literals
from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_http_methods
from ...product.models import Product, ProductImage, Stock, ProductAttribute, \
ProductVariant
from ..utils import paginate
from ..views import staff_member_required
from . import forms
@staff_member_required
def product_list(request):
products = Product.objects.prefetch_related('images').select_subclasses()
form = forms.ProductClassForm(request.POST or None)
if form.is_valid():
return redirect('dashboard:product-add')
products, paginator = paginate(products, 30, request.GET.get('page'))
ctx = {'form': form, 'products': products, 'paginator': paginator}
return TemplateResponse(request, 'dashboard/product/list.html', ctx)
@staff_member_required
def product_create(request):
product = Product()
form = forms.ProductForm(request.POST or None, instance=product)
if form.is_valid():
product = form.save()
msg = _('Added product %s') % product
messages.success(request, msg)
return redirect('dashboard:variant-add', product_pk=product.pk)
ctx = {'product_form': form, 'product': product}
return TemplateResponse(request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_edit(request, pk):
product = get_object_or_404(
Product.objects.select_subclasses().prefetch_related('images',
'variants'), pk=pk)
attributes = product.attributes.prefetch_related('values')
images = product.images.all()
variants = product.variants.select_subclasses()
stock_items = Stock.objects.filter(variant__in=variants)
form = forms.ProductForm(request.POST or None, instance=product)
variants_delete_form = forms.VariantBulkDeleteForm()
stock_delete_form = forms.StockBulkDeleteForm()
if form.is_valid():
product = form.save()
msg = _('Updated product %s') % product
messages.success(request, msg)
return redirect('dashboard:product-update', pk=product.pk)
ctx = {'attributes': attributes, 'images': images, 'product_form': form,
'product': product, 'stock_delete_form': stock_delete_form,
'stock_items': stock_items, 'variants': variants,
'variants_delete_form': variants_delete_form}
return TemplateResponse(request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == 'POST':
product.delete()
messages.success(request, _('Deleted product %s') % product)
return redirect('dashboard:product-list')
return TemplateResponse(
request, 'dashboard/product/modal_product_confirm_delete.html',
{'product': product})
@staff_member_required
def stock_edit(request, product_pk, stock_pk=None):
product = get_object_or_404(Product, pk=product_pk)
if stock_pk:
stock = get_object_or_404(Stock, pk=stock_pk)
else:
stock = Stock()
form = forms.StockForm(request.POST or None, instance=stock,
product=product)
if form.is_valid():
form.save()
messages.success(request, _('Saved stock'))
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'form': form, 'product': product, 'stock': stock}
return TemplateResponse(request, 'dashboard/product/stock_form.html', ctx)
@staff_member_required
def stock_delete(request, product_pk, stock_pk):
product = get_object_or_404(Product, pk=product_pk)
stock = get_object_or_404(Stock, pk=stock_pk)
if request.method == 'POST':
stock.delete()
messages.success(request, _('Deleted stock'))
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'product': product, 'stock': stock}
return TemplateResponse(
request, 'dashboard/product/stock_confirm_delete.html', ctx)
@staff_member_required
@require_http_methods(['POST'])
def stock_bulk_delete(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.StockBulkDeleteForm(request.POST)
if form.is_valid():
form.delete()
success_url = request.POST['success_url']
messages.success(request, _('Deleted stock'))
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
return redirect('dashboard:product-update', pk=product.pk)
@staff_member_required
def product_image_edit(request, product_pk, img_pk=None):
product = get_object_or_404(Product, pk=product_pk)
if img_pk:
product_image = get_object_or_404(product.images, pk=img_pk)
else:
product_image = ProductImage(product=product)
form = forms.ProductImageForm(request.POST or None, request.FILES or None,
instance=product_image)
if form.is_valid():
product_image = form.save()
if img_pk:
msg = _('Updated image %s') % product_image.image.name
else:
msg = _('Added image %s') % product_image.image.name
messages.success(request, msg)
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'form': form, 'product': product, 'product_image': product_image}
return TemplateResponse(
request, 'dashboard/product/product_image_form.html', ctx)
@staff_member_required
def product_image_delete(request, product_pk, img_pk):
product = get_object_or_404(Product, pk=product_pk)
product_image = get_object_or_404(product.images, pk=img_pk)
if request.method == 'POST':
product_image.delete()
messages.success(
request, _('Deleted image %s') % product_image.image.name)
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'product': product, 'product_image': product_image}
return TemplateResponse(
request,
'dashboard/product/modal_product_image_confirm_delete.html', ctx)
@staff_member_required
def variant_edit(request, product_pk, variant_pk=None):
product = get_object_or_404(Product.objects.select_subclasses(),
pk=product_pk)
form_initial = {}
if variant_pk:
variant = get_object_or_404(product.variants.select_subclasses(),
pk=variant_pk)
else:
variant = ProductVariant(product=product)
form = forms.ProductVariantForm(request.POST or None, instance=variant,
initial=form_initial)
attribute_form = forms.VariantAttributeForm(request.POST or None,
instance=variant)
if all([form.is_valid(), attribute_form.is_valid()]):
form.save()
attribute_form.save()
if variant_pk:
msg = _('Updated variant %s') % variant.name
else:
msg = _('Added variant %s') % variant.name
messages.success(request, msg)
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'attribute_form': attribute_form, 'form': form, 'product': product,
'variant': variant}
return TemplateResponse(request, 'dashboard/product/variant_form.html', ctx)
@staff_member_required
def variant_delete(request, product_pk, variant_pk):
product = get_object_or_404(Product, pk=product_pk)
variant = get_object_or_404(product.variants, pk=variant_pk)
is_only_variant = product.variants.count() == 1
if request.method == 'POST':
variant.delete()
messages.success(request, _('Deleted variant %s') % variant.name)
success_url = request.POST['success_url']
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
ctx = {'is_only_variant': is_only_variant, 'product': product,
'variant': variant}
return TemplateResponse(
request,
'dashboard/product/modal_product_variant_confirm_delete.html', ctx)
@staff_member_required
@require_http_methods(['POST'])
def variants_bulk_delete(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.VariantBulkDeleteForm(request.POST)
if form.is_valid():
form.delete()
success_url = request.POST['success_url']
messages.success(request, _('Deleted variants'))
if is_safe_url(success_url, request.get_host()):
return redirect(success_url)
return redirect('dashboard:product-update', pk=product.pk)
@staff_member_required
def attribute_list(request):
attributes = ProductAttribute.objects.prefetch_related('values')
ctx = {'attributes': attributes}
return TemplateResponse(request, 'dashboard/product/attributes/list.html',
ctx)
@staff_member_required
def attribute_edit(request, pk=None):
if pk:
attribute = get_object_or_404(ProductAttribute, pk=pk)
else:
attribute = ProductAttribute()
form = forms.ProductAttributeForm(request.POST or None, instance=attribute)
formset = forms.AttributeChoiceValueFormset(request.POST or None,
request.FILES or None,
instance=attribute)
if all([form.is_valid(), formset.is_valid()]):
attribute = form.save()
formset.save()
msg = _('Updated attribute') if pk else _('Added attribute')
messages.success(request, msg)
return redirect('dashboard:product-attribute-update', pk=attribute.pk)
ctx = {'attribute': attribute, 'form': form, 'formset': formset}
return TemplateResponse(request, 'dashboard/product/attributes/form.html',
ctx)
@staff_member_required
def attribute_delete(request, pk):
attribute = get_object_or_404(ProductAttribute, pk=pk)
if request.method == 'POST':
attribute.delete()
messages.success(request, _('Deleted attribute %s' % attribute.display))
return redirect('dashboard:product-attributes')
ctx = {'attribute': attribute}
return TemplateResponse(
request, 'dashboard/product/attributes/modal_confirm_delete.html', ctx)
| bsd-3-clause |
lucidbard/NewsBlur | apps/reader/migrations/0008_oldest_unread_story_date.py | 18 | 12369 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserSubscription.oldest_unread_story_date'
db.add_column('reader_usersubscription', 'oldest_unread_story_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserSubscription.oldest_unread_story_date'
db.delete_column('reader_usersubscription', 'oldest_unread_story_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reader.feature': {
'Meta': {'ordering': "['-date']", 'object_name': 'Feature'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'reader.userstory': {
'Meta': {'unique_together': "(('user', 'feed', 'story'),)", 'object_name': 'UserStory'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'read_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Story']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'reader.usersubscription': {
'Meta': {'unique_together': "(('user', 'feed'),)", 'object_name': 'UserSubscription'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscribers'", 'to': "orm['rss_feeds.Feed']"}),
'feed_opens': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trained': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 1, 1, 23, 38, 35, 930483)'}),
'mark_read_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 1, 1, 23, 38, 35, 930483)'}),
'needs_unread_recalc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'oldest_unread_story_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'unread_count_negative': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_neutral': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_positive': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unread_count_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': "orm['auth.User']"}),
'user_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'reader.usersubscriptionfolders': {
'Meta': {'object_name': 'UserSubscriptionFolders'},
'folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'ordering': "['-story_date']", 'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['reader']
| mit |
UFTS-Device/NanoCoin | qa/rpc-tests/test_framework/bignum.py | 123 | 1929 | #!/usr/bin/env python3
#
# bignum.py
#
# This file is copied from python-bitcoinlib.
#
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit |
CnrLwlss/HTSauto | HTSscripts/C2FindBarcode.py | 1 | 7171 | # Finds images from ID in our archive and dumps file locations to .json file
# Only images where a specific treatment and medium were applied, captured before a cutoff period after inoculation, are considered
# Optionally copies symlinks to images or image files themselves to pdump directory for inspection/download
# This should read in arguments from the command line
# First argument: experiment ID (e.g. QFA0060)
# Second (optional) argument: cutoff time after inoculation (days)
# If a cutoff time not specified, include all images
import sys
import argparse
import os
import pandas
import colonyzer2.functions as c2
from datetime import datetime
import json
import shutil
import string
def toDelete(filename):
'''Generate list of output files which should be deleted'''
base=os.path.basename(filename).split(".")[0]
path=os.path.dirname(filename)
candidates=[
# Archival format (original Colonyzer)
os.path.join(path,base+"GriddedCorrected.png"),
os.path.join(path,base+"GriddedThresh.png"),
os.path.join(path,base+"Histogram.png"),
os.path.join(path,base+"OUT.dat"),
# Colonyzer2 format
os.path.join(path,"Output_Data",base+".dat"),
os.path.join(path,"Output_Data",base+".out"),
os.path.join(path,"Output_Images",base+".png")
]
return(candidates)
def parseArgs():
parser=argparse.ArgumentParser(description="Build .json file describing location of timecourse images for a particularly QFA experiment. Also build directory of symlinks/shortcuts to images, or image files from QFA file archive. Execute from LOGS3 directory.")
parser.add_argument("ID", type=str, help="QFA experiment ID (e.g. QFA00001).")
parser.add_argument("-c","--cutoff",type=float, help="Maximum number of days after inoculation, beyond which images are ignored (e.g. 4.0).")
parser.add_argument("-t","--treatment",type=str, help="Only return images of plates from experiment to which treatment was applied (e.g. 30).=")
parser.add_argument("-m","--medium",type=str, help="Only return images of plates from experiment which contained this medium (e.g. CSM).")
parser.add_argument("-p","--photos",action='store_true', help="Copy symlinks to images to ../pdump directory.")
args = parser.parse_args()
return(args)
if __name__ == '__main__':
#sys.argv=['test', 'QFA0018']
args=parseArgs()
# Should execute this script from LOGS3 directory
rootDir=os.getcwd()
expt=str(args.ID)
copyphotos=args.photos
exptType=expt[0:-4]
dataDir=os.path.join(rootDir,exptType+"_EXPERIMENTS")
expDescFile=os.path.join(dataDir,expt,"AUXILIARY","ExptDescription.txt")
metaDF=pandas.read_csv(expDescFile,sep="\t")
print("Possible treatments:")
trts=metaDF["Treatment"].unique()
print(string.join([str(trt) for trt in trts],sep="\t"))
print("Possible media:")
meds=metaDF["Medium"].unique()
print(string.join([str(med) for med in meds],sep="\t"))
# Default behaviour for missing optional arguments
if args.cutoff is not None:
cutoff=float(args.cutoff)
else:
cutoff=999999999.0
if args.treatment is not None:
treatment=str(args.treatment)
metaDF=metaDF[metaDF["Treatment"].astype(str)==treatment]
print("Only take treatment "+treatment+" for "+expt)
if args.medium is not None:
medium=str(args.medium)
metaDF=metaDF[metaDF["Medium"].astype(str)==medium]
print("Only take medium "+medium+" for "+expt)
# Strip rows that have nan in barcode column (e.g. QFA0132)
#metaDF=metaDF[pandas.notnull(metaDF["Barcode"])]
# Search in some directories for images that can be analysed
List_96=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_96","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_96","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_96"]
List_384=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT","/home/yeastimages/CAPTURED_IMAGES_STANDALONE","/home/yeastimages/CAPTURED_IMAGES_WARMROOM"]
List_768=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_768","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_768","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_768"]
List_1536=["/home/yeastimages/CAPTURED_IMAGES_CYTOMAT_1536","/home/yeastimages/CAPTURED_IMAGES_STANDALONE_1536","/home/yeastimages/CAPTURED_IMAGES_WARMROOM_1536"]
Archive_384=["/home/yeastimages/ARCHIVE_IMAGES"]
# Should 768 be included in QFA (e.g. Pombe QFA)? What about 96-format?
searchOptions={"QFA":List_384+Archive_384,"MQFQFA":List_384,"SGA":List_1536+List_768,"MQFSGA":List_1536}
searchDirs=searchOptions[exptType]
barcLen=len(metaDF["Barcode"].iloc[0])
bdictfname=exptType+"_file_locations.json"
if not os.path.isfile(bdictfname):
# Assume that all barcodes have the same format as the first Barcode in metaDF
barcDict=c2.merge_lodols([c2.getBarcodes(directory,barcRange=(0,barcLen),checkDone=False) for directory in searchDirs])
with open(bdictfname, 'wb') as f:
json.dump(barcDict, f)
else:
with open(bdictfname) as f:
barcDict=json.load(f)
# Check that all the barcodes in metaDF appear in the list of files, otherwise throw an error?
if not set(metaDF["Barcode"])<set(barcDict.keys()):
print(set(metaDF["Barcode"]))
raise Exception("There are barcodes in the ExptDescription.txt file for which I cannot find any images!")
barcDict={x:barcDict[x] for x in metaDF["Barcode"]}
barcFiles=[item for sublist in barcDict.values() for item in sublist]
print("Deleting any pre-existing output files")
cdelete=0
for f in barcFiles:
print "Deleting analysis files for: "+f
candidates=toDelete(f)
for c in candidates:
if os.path.exists(c):
os.remove(c)
cdelete=cdelete+1
print str(cdelete) + " analysis files (.png, .out & .dat files) deleted..."
print("Filtering images beyond cutoff from list to be analysed")
for i in xrange(0,len(metaDF["Barcode"])):
barc=metaDF["Barcode"].iloc[i]
inoc=metaDF["Start.Time"].iloc[i]
flist=barcDict[barc]
nimages=len(barcDict[barc])
dates=[(c2.getDate(f)-datetime.strptime(inoc,"%Y-%m-%d_%H-%M-%S")).total_seconds()/(24*60*60) for f in flist]
barcDict[barc]=[f for ind,f in enumerate(flist) if dates[ind]<=cutoff]
print("Will ignore last "+str(nimages-len(barcDict[barc]))+" images from "+barc)
dictOut=os.path.join(dataDir,expt,"AUXILIARY",expt+'_C2.json')
print("Writing dictionary of images for analysis to file: "+dictOut)
with open(dictOut, 'wb') as fp:
json.dump(barcDict, fp)
if (copyphotos):
dirname="../pdump"
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.mkdir(dirname)
for barc in barcDict.keys():
for f in barcDict[barc]:
fname=os.path.basename(f)
targ=os.path.join(dirname,fname)
print("Copying "+f+" to "+targ)
os.symlink(f,targ)
| gpl-2.0 |
jusdng/odoo | addons/website/tests/test_crawl.py | 251 | 3415 | # -*- coding: utf-8 -*-
import logging
import urlparse
import time
import lxml.html
import openerp
import re
_logger = logging.getLogger(__name__)
class Crawler(openerp.tests.HttpCase):
""" Test suite crawling an openerp CMS instance and checking that all
internal links lead to a 200 response.
If a username and a password are provided, authenticates the user before
starting the crawl
"""
at_install = False
post_install = True
def crawl(self, url, seen=None, msg=''):
if seen == None:
seen = set()
url_slug = re.sub(r"[/](([^/=?&]+-)?[0-9]+)([/]|$)", '/<slug>/', url)
url_slug = re.sub(r"([^/=?&]+)=[^/=?&]+", '\g<1>=param', url_slug)
if url_slug in seen:
return seen
else:
seen.add(url_slug)
_logger.info("%s %s", msg, url)
r = self.url_open(url)
code = r.getcode()
self.assertIn( code, xrange(200, 300), "%s Fetching %s returned error response (%d)" % (msg, url, code))
if r.info().gettype() == 'text/html':
doc = lxml.html.fromstring(r.read())
for link in doc.xpath('//a[@href]'):
href = link.get('href')
parts = urlparse.urlsplit(href)
# href with any fragment removed
href = urlparse.urlunsplit((
parts.scheme,
parts.netloc,
parts.path,
parts.query,
''
))
# FIXME: handle relative link (not parts.path.startswith /)
if parts.netloc or \
not parts.path.startswith('/') or \
parts.path == '/web' or\
parts.path.startswith('/web/') or \
parts.path.startswith('/en_US/') or \
(parts.scheme and parts.scheme not in ('http', 'https')):
continue
self.crawl(href, seen, msg)
return seen
def test_10_crawl_public(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
seen = self.crawl('/', msg='Anonymous Coward')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "public crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request, ", count, duration, sql, duration/count, float(sql)/count)
def test_20_crawl_demo(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('demo', 'demo')
seen = self.crawl('/', msg='demo')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "demo crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
def test_30_crawl_admin(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('admin', 'admin')
seen = self.crawl('/', msg='admin')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "admin crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
| agpl-3.0 |
Bytewerk/uClinux-ipcam | user/samba/source/python/examples/tdbpack/oldtdbutil.py | 55 | 4116 | #!/usr/bin/python
#############################################################
# tdbutil
#
# Purpose:
# Contains functions that are used to pack and unpack data
# from Samba's tdb databases. Samba sometimes represents complex
# data structures as a single value in a database. These functions
# allow other python scripts to package data types into a single python
# string and unpackage them.
#
#
# XXXXX: This code is no longer used; it's just here for testing
# compatibility with the new (much faster) C implementation.
#
##############################################################
import string
def pack(format,list):
retstring = ''
listind = 0
# Cycle through format entries
for type in format:
# Null Terminated String
if (type == 'f' or type == 'P'):
retstring = retstring + list[listind] + "\000"
# 4 Byte Number
if (type == 'd'):
retstring = retstring + PackNum(list[listind],4)
# 2 Byte Number
if (type == 'w'):
retstring = retstring + PackNum(list[listind],2)
# Pointer Value
if (type == 'p'):
if (list[listind]):
retstring = retstring + PackNum(1,4)
else:
retstring = retstring + PackNum(0,4)
# Buffer and Length
if (type == 'B'):
# length
length = list[listind]
retstring = retstring + PackNum(length,4)
length = int(length)
listind = listind + 1
# buffer
retstring = retstring + list[listind][:length]
listind = listind + 1
return retstring
def unpack(format,buffer):
retlist = []
bufind = 0
lasttype = ""
for type in format:
# Pointer Value
if (type == 'p'):
newvalue = UnpackNum(buffer[bufind:bufind+4])
bufind = bufind + 4
if (newvalue):
newvalue = 1L
else:
newvalue = 0L
retlist.append(newvalue)
# Previous character till end of data
elif (type == '$'):
if (lasttype == 'f'):
while (bufind < len(buffer)):
newstring = ''
while (buffer[bufind] != '\000'):
newstring = newstring + buffer[bufind]
bufind = bufind + 1
bufind = bufind + 1
retlist.append(newstring)
# Null Terminated String
elif (type == 'f' or type == 'P'):
newstring = ''
while (buffer[bufind] != '\000'):
newstring = newstring + buffer[bufind]
bufind = bufind + 1
bufind = bufind + 1
retlist.append(newstring)
# 4 Byte Number
elif (type == 'd'):
newvalue = UnpackNum(buffer[bufind:bufind+4])
bufind = bufind + 4
retlist.append(newvalue)
# 2 Byte Number
elif (type == 'w'):
newvalue = UnpackNum(buffer[bufind:bufind+2])
bufind = bufind + 2
retlist.append(newvalue)
# Length and Buffer
elif (type == 'B'):
# Length
length = UnpackNum(buffer[bufind:bufind+4])
bufind = bufind + 4
retlist.append(length)
length = int(length)
# Buffer
retlist.append(buffer[bufind:bufind+length])
bufind = bufind + length
lasttype = type
return ((retlist,buffer[bufind:]))
def PackNum(myint,size):
retstring = ''
size = size * 2
hint = hex(myint)[2:]
# Check for long notation
if (hint[-1:] == 'L'):
hint = hint[:-1]
addon = size - len(hint)
for i in range(0,addon):
hint = '0' + hint
while (size > 0):
val = string.atoi(hint[size-2:size],16)
retstring = retstring + chr(val)
size = size - 2
return retstring
def UnpackNum(buffer):
size = len(buffer)
mystring = ''
for i in range(size-1,-1,-1):
val = hex(ord(buffer[i]))[2:]
if (len(val) == 1):
val = '0' + val
mystring = mystring + val
if (len(mystring) > 4):
return string.atol(mystring,16)
else:
return string.atoi(mystring,16)
| gpl-2.0 |
GkAntonius/feynman | docs/auto_examples/Particle_Physics/plot_dchp1.py | 2 | 2222 | """
DCHP1
=====
Doubly Charged Higgs Production
"""
import matplotlib
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
from feynman import Diagram
fig = matplotlib.pyplot.figure(figsize=(10.,10.))
ax = fig.add_axes([0,0,1,1], frameon=False)
diagram = Diagram(ax)
diagram.text(.4,0.9,"Doubly Charged Higgs Production", fontsize=40)
in1 = diagram.vertex(xy=(.1,.75), marker='')
in2= diagram.vertex(xy=(.1,.25), marker='')
v1 = diagram.vertex(xy=(.35,.5))
v2 = diagram.vertex(xy=(.65,.5))
higgsplusout = diagram.vertex(xy=(.8,.7))
higgsminusout = diagram.vertex(xy=(.8,.3))
l1plus = diagram.vertex(xy=(.95,.8), marker='')
l2plus = diagram.vertex(xy=(.95,.6), marker='')
l1minus = diagram.vertex(xy=(.95,.4), marker='')
l2minus = diagram.vertex(xy=(.95,.2), marker='')
lw = 5
q1 = diagram.line(v1, in1, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
q2 = diagram.line(in2, v1, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
l1 = diagram.line(l1plus, higgsplusout, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
l2 = diagram.line(l2plus, higgsplusout, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
l3 = diagram.line(higgsminusout, l1minus, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
l4 = diagram.line(higgsminusout, l2minus, color='blue', lw=lw, arrow_param=dict(color='blue', length=0.08, width=0.02))
wz1 = diagram.line(v1, v2, style='wiggly', color='green', lw=lw)
higgsplus = diagram.line(v2, higgsplusout, arrow=False, ls='dashed', lw=lw, dashes=(4, 2))
higgsminus = diagram.line(v2, higgsminusout, arrow=False, ls='dashed', lw=lw, dashes=(4, 2))
q1.text(r"$\bar{\mathrm{q}}$", fontsize=40)
q2.text("q",fontsize=40)
diagram.text(0.5, 0.42, r"$Z \ / \ \gamma*$", fontsize=40)
diagram.text(0.8, 0.58, r"$H^{++}$", fontsize=40)
diagram.text(0.8, 0.42, r"$H^{--}$", fontsize=40)
diagram.text(0.98, 0.8, r"$l^+$", fontsize=40)
diagram.text(0.98, 0.6, r"$l^+$", fontsize=40)
diagram.text(0.98, 0.4, r"$l^-$", fontsize=40)
diagram.text(0.98, 0.2, r"$l^-$", fontsize=40)
diagram.plot()
matplotlib.pyplot.show()
| gpl-3.0 |
matchstick/kubernetes | hack/boilerplate/boilerplate.py | 31 | 5238 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import mmap
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument("--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument("--boilerplate-dir", default=default_boilerplate_dir)
args = parser.parse_args()
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except:
return False
data = f.read()
f.close()
basename = os.path.basename(filename)
extension = file_extension(filename)
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove build tags from the top of Go files
if extension == "go":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
# remove shebang from the top of shell files
if extension == "sh":
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
return False
# Replace all occurrences of the regex "2016|2015|2014" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go"]
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile( 'YEAR' )
# dates can be 2014, 2015 or 2016, company holder names can be anything
regexs["date"] = re.compile( '(2014|2015|2016)' )
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from shell scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
landryb/QGIS | python/plugins/processing/algs/qgis/RegularPoints.py | 10 | 4770 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RegularPoints.py
---------------------
Date : September 2014
Copyright : (C) 2014 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'September 2014'
__copyright__ = '(C) 2014, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from random import seed, uniform
from math import sqrt
from PyQt4.QtCore import QVariant
from qgis.core import QGis, QgsRectangle, QgsFields, QgsField, QgsFeature, QgsGeometry, QgsPoint
from qgis.utils import iface
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterExtent
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputVector
class RegularPoints(GeoAlgorithm):
EXTENT = 'EXTENT'
SPACING = 'SPACING'
INSET = 'INSET'
RANDOMIZE = 'RANDOMIZE'
IS_SPACING = 'IS_SPACING'
OUTPUT = 'OUTPUT'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Regular points')
self.group, self.i18n_group = self.trAlgorithm('Vector creation tools')
self.addParameter(ParameterExtent(self.EXTENT,
self.tr('Input extent')))
self.addParameter(ParameterNumber(self.SPACING,
self.tr('Point spacing/count'), 0.0001, 999999999.999999999, 0.0001))
self.addParameter(ParameterNumber(self.INSET,
self.tr('Initial inset from corner (LH side)'), 0.0, 9999.9999, 0.0))
self.addParameter(ParameterBoolean(self.RANDOMIZE,
self.tr('Apply random offset to point spacing'), False))
self.addParameter(ParameterBoolean(self.IS_SPACING,
self.tr('Use point spacing'), True))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Regular points')))
def processAlgorithm(self, progress):
extent = unicode(self.getParameterValue(self.EXTENT)).split(',')
spacing = float(self.getParameterValue(self.SPACING))
inset = float(self.getParameterValue(self.INSET))
randomize = self.getParameterValue(self.RANDOMIZE)
isSpacing = self.getParameterValue(self.IS_SPACING)
extent = QgsRectangle(float(extent[0]), float(extent[2]),
float(extent[1]), float(extent[3]))
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 10, 0))
mapCRS = iface.mapCanvas().mapSettings().destinationCrs()
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
fields, QGis.WKBPoint, mapCRS)
if randomize:
seed()
area = extent.width() * extent.height()
if isSpacing:
pSpacing = spacing
else:
pSpacing = sqrt(area / spacing)
f = QgsFeature()
f.initAttributes(1)
f.setFields(fields)
count = 0
total = 100.00 / (area / pSpacing)
y = extent.yMaximum() - inset
while y >= extent.yMinimum():
x = extent.xMinimum() + inset
while x <= extent.xMaximum():
if randomize:
geom = QgsGeometry().fromPoint(QgsPoint(
uniform(x - (pSpacing / 2.0), x + (pSpacing / 2.0)),
uniform(y - (pSpacing / 2.0), y + (pSpacing / 2.0))))
else:
geom = QgsGeometry().fromPoint(QgsPoint(x, y))
if geom.intersects(extent):
f.setAttribute('id', count)
f.setGeometry(geom)
writer.addFeature(f)
x += pSpacing
count += 1
progress.setPercentage(int(count * total))
y = y - pSpacing
del writer
| gpl-2.0 |
aflaxman/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 127 | 1270 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure('scikit-learn parallel %s benchmark results' % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel('n_samples')
plt.ylabel('Time (s)')
plt.title('Parallel %s' % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| bsd-3-clause |
nicoddemus/backtrader | samples/quickstart06.py | 1 | 4786 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# To keep track of pending orders
self.order = None
def notify(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enougth cash
if order.status in [order.Completed, order.Canceled, order.Margin]:
if order.isbuy():
self.log('BUY EXECUTED, %.2f' % order.executed.price)
elif order.issell():
self.log('SELL EXECUTED, %.2f' % order.executed.price)
self.bar_executed = len(self)
# Write down: no pending order
self.order = None
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.dataclose[0] < self.dataclose[-1]:
# current close less than previous close
if self.dataclose[-1] < self.dataclose[-2]:
# previous close less than the previous close
# BUY, BUY, BUY!!! (with default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
else:
# Already in the market ... we might sell
if len(self) >= (self.bar_executed + 5):
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# Add a strategy
cerebro.addstrategy(TestStrategy)
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
datapath = os.path.join(modpath, './datas/yahoo/oracle-1995-2014.csv')
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=datetime.datetime(2000, 01, 01),
# Do not pass values before this date
todate=datetime.datetime(2000, 12, 31),
# Do not pass values after this date
reversed=True)
# Add the Data Feed to Cerebro
cerebro.adddata(data)
# Set our desired cash start
cerebro.broker.setcash(100000.0)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
| gpl-3.0 |
liupfskygre/qiime | tests/test_add_alpha_to_mapping_file.py | 15 | 14308 | #!/usr/bin/env python
# File created on 02 Nov 2012
from __future__ import division
__author__ = "Yoshiki Vazquez-Baeza"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Yoshiki Vazquez-Baeza"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Yoshiki Vazquez-Baeza"
__email__ = "[email protected]"
from numpy import array, median
from unittest import TestCase, main
from qiime.add_alpha_to_mapping_file import (
add_alpha_diversity_values_to_mapping_file,
_get_level, mean_alpha)
class TopLevelTests(TestCase):
def setUp(self):
self.metrics = ['chao1', 'PD_whole_tree']
self.alpha_diversity_data = array([[173., 6.39901], [332.5, 7.48089],
[189.9375, 5.5103], [223.58333333,
6.26648], [
176.8, 5.40341],
[90., 4.84129], [127., 4.50866], [211., 7.3172], [146., 6.57543]])
self.sample_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354',
'PC.593', 'PC.355', 'PC.607', 'PC.634']
self.collated_alpha_dict_a = COLLATED_ALPHA_DICT_A
self.collated_alpha_dict_b = COLLATED_ALPHA_DICT_B
self.mapping_file_data = MAPPING_FILE_DATA
self.mapping_file_headers = ['SampleID', 'BarcodeSequence',
'LinkerPrimerSequence', 'Treatment', 'DOB', 'Description']
def test_add_alpha_diversity_values_to_mapping_file(self):
"""checks a mapping file is added with the proper fields """
# regular case no special cases for avg method
expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_A
expected_mapping_file_headers = ['SampleID', 'BarcodeSequence',
'LinkerPrimerSequence', 'Treatment', 'DOB', 'Description',
'chao1_alpha', 'chao1_normalized_alpha', 'chao1_alpha_label',
'PD_whole_tree_alpha', 'PD_whole_tree_normalized_alpha',
'PD_whole_tree_alpha_label']
out_mapping_file_data, out_mapping_file_headers =\
add_alpha_diversity_values_to_mapping_file(self.metrics,
self.sample_ids, self.alpha_diversity_data,
self.mapping_file_headers, self.mapping_file_data, 4, 'equal')
self.assertEquals(out_mapping_file_data, expected_mapping_file_data)
self.assertEquals(
out_mapping_file_headers,
expected_mapping_file_headers)
# regular case no special cases for quantile method
expected_mapping_file_data = MAPPING_FILE_DATA_WITH_ALPHA_B
out_mapping_file_data, out_mapping_file_headers =\
add_alpha_diversity_values_to_mapping_file(self.metrics,
self.sample_ids, self.alpha_diversity_data,
self.mapping_file_headers, self.mapping_file_data, 4, 'quantile')
self.assertEquals(out_mapping_file_data, expected_mapping_file_data)
self.assertEquals(
out_mapping_file_headers,
expected_mapping_file_headers)
def test__get_level(self):
""" checks the level assignment is done correctly """
# check regular case with and without prefix tags
expected_output = 1
output = _get_level(0.20, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 'level_bin_1_of_4'
output = _get_level(0.20, [0.25, 0.5, 0.75], 'level_bin')
self.assertEquals(output, expected_output)
expected_output = 'level_bin_3_of_6'
output = _get_level(0.20, [0.05, 0.15, 0.35, 0.8, 0.95], 'level_bin')
self.assertEquals(output, expected_output)
# edge cases with and without prefix tags
expected_output = 2
output = _get_level(0.25, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 4
output = _get_level(1, [0.25, 0.5, 0.75])
self.assertEquals(output, expected_output)
expected_output = 'testing_bin_2_of_4'
output = _get_level(0.25, [0.25, 0.5, 0.75], 'testing_bin')
self.assertEquals(output, expected_output)
expected_output = 'testing_bin_4_of_4'
output = _get_level(1, [0.25, 0.5, 0.75], 'testing_bin')
self.assertEquals(output, expected_output)
# unwanted cases, greater than one and negative values
with self.assertRaises(ValueError):
output = _get_level(1.3, [0.5])
with self.assertRaises(ValueError):
output = _get_level(-1, [0.25, 0.5, 0.75])
def test_mean_alpha(self):
"""checks data is being correctly averaged"""
# regular use-cases for this function
expected_data = [[9.441785, 82.93],
[0.42877, 5.2006], [9.625995, 8.18]]
expected_metrics = ['PD_whole_tree_even_310', 'chao1_even_310']
expected_sample_ids = ['s1', 's2', 's3']
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a, 310)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
expected_data = [[12.508435, 11.6105],
[0.42877, 8.42], [11.58785, 1.0]]
expected_metrics = ['PD_whole_tree_even_610', 'chao1_even_610']
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a, 610)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
# should default to the highest depth
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_a,
None)
self.assertEquals(o_metrics, expected_metrics)
self.assertEquals(o_sample_ids, expected_sample_ids)
self.assertEquals(o_data, expected_data)
# non-existant depth
with self.assertRaises(ValueError):
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_b, 111111)
# files with non-matching sample ids should raise an exception
with self.assertRaises(ValueError):
o_metrics, o_sample_ids, o_data = mean_alpha(
self.collated_alpha_dict_b, 310)
# input types that should not be processed
with self.assertRaises(AssertionError):
output = mean_alpha([1, 2, 3], 5)
with self.assertRaises(AssertionError):
output = mean_alpha({'a': 'b'}, -1.4)
MAPPING_FILE_DATA = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636']]
MAPPING_FILE_DATA_WITH_ALPHA_A = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354',
'176.8',
'0.35793814433',
'bin_2_of_4',
'5.40341',
'0.301036595418',
'bin_2_of_4'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355',
'127.0',
'0.152577319588',
'bin_1_of_4',
'4.50866',
'0.0',
'bin_1_of_4'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356',
'189.9375',
'0.412113402062',
'bin_2_of_4',
'5.5103',
'0.336999491964',
'bin_2_of_4'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481',
'223.58333333',
'0.550859106515',
'bin_3_of_4',
'6.26648',
'0.59141452714',
'bin_3_of_4'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593',
'90.0',
'0.0',
'bin_1_of_4',
'4.84129',
'0.111912604341',
'bin_1_of_4'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607',
'211.0',
'0.498969072165',
'bin_2_of_4',
'7.3172',
'0.944926873089',
'bin_4_of_4'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634',
'146.0',
'0.230927835052',
'bin_1_of_4',
'6.57543',
'0.695360049525',
'bin_3_of_4'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635',
'332.5',
'1.0',
'bin_4_of_4',
'7.48089',
'1.0',
'bin_4_of_4'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636', '173.0', '0.342268041237', 'bin_2_of_4', '6.39901', '0.636003943167', 'bin_3_of_4']]
MAPPING_FILE_DATA_WITH_ALPHA_B = [
['PC.354',
'AGCACGAGCCTA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._354',
'176.8',
'0.35793814433',
'bin_3_of_4',
'5.40341',
'0.301036595418',
'bin_2_of_4'],
['PC.355',
'AACTCGTCGATG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061218',
'Control_mouse_I.D._355',
'127.0',
'0.152577319588',
'bin_1_of_4',
'4.50866',
'0.0',
'bin_1_of_4'],
['PC.356',
'ACAGACCACTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20061126',
'Control_mouse_I.D._356',
'189.9375',
'0.412113402062',
'bin_3_of_4',
'5.5103',
'0.336999491964',
'bin_2_of_4'],
['PC.481',
'ACCAGCGACTAG',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20070314',
'Control_mouse_I.D._481',
'223.58333333',
'0.550859106515',
'bin_4_of_4',
'6.26648',
'0.59141452714',
'bin_3_of_4'],
['PC.593',
'AGCAGCACTTGT',
'YATGCTGCCTCCCGTAGGAGT',
'Control',
'20071210',
'Control_mouse_I.D._593',
'90.0',
'0.0',
'bin_1_of_4',
'4.84129',
'0.111912604341',
'bin_1_of_4'],
['PC.607',
'AACTGTGCGTAC',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20071112',
'Fasting_mouse_I.D._607',
'211.0',
'0.498969072165',
'bin_4_of_4',
'7.3172',
'0.944926873089',
'bin_4_of_4'],
['PC.634',
'ACAGAGTCGGCT',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._634',
'146.0',
'0.230927835052',
'bin_2_of_4',
'6.57543',
'0.695360049525',
'bin_4_of_4'],
['PC.635',
'ACCGCAGAGTCA',
'YATGCTGCCTCCCGTAGGAGT',
'Fast',
'20080116',
'Fasting_mouse_I.D._635',
'332.5',
'1.0',
'bin_4_of_4',
'7.48089',
'1.0',
'bin_4_of_4'],
['PC.636', 'ACGGTGAGTGTC', 'YATGCTGCCTCCCGTAGGAGT', 'Fast', '20080116', 'Fasting_mouse_I.D._636', '173.0', '0.342268041237', 'bin_2_of_4', '6.39901', '0.636003943167', 'bin_3_of_4']]
COLLATED_ALPHA_DICT_A = {
'PD_whole_tree': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t1.99181\t0.42877\t2.13996',
'rare10.txt\t10\t1\t2.07163\t0.42877\t2.37055',
'rare310.txt\t310\t0\t8.83115\t0.42877\t11.00725',
'rare310.txt\t310\t1\t10.05242\t0.42877\t8.24474',
'rare610.txt\t610\t0\t12.03067\t0.42877\t11.58928',
'rare610.txt\t610\t1\t12.9862\t0.42877\t11.58642'],
'chao1': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t4.2\t3.1415\t9.11',
'rare10.txt\t10\t1\t5.6\t3.15\t9.62',
'rare310.txt\t310\t0\t83.11\t5.2012\t8.12',
'rare310.txt\t310\t1\t82.75\t5.2000\t8.24',
'rare610.txt\t610\t0\t11.11\t8.42\t1',
'rare610.txt\t610\t1\t12.111\t8.42\t1']
}
COLLATED_ALPHA_DICT_B = {
'PD_whole_tree': ['\tsequences per sample\titeration\ts1\ts2\ts3',
'rare10.txt\t10\t0\t1.99181\t0.42877\t2.13996',
'rare10.txt\t10\t1\t2.07163\t0.42877\t2.37055',
'rare310.txt\t310\t0\t8.83115\t0.42877\t11.00725',
'rare310.txt\t310\t1\t10.05242\t0.42877\t8.24474',
'rare610.txt\t610\t0\t12.03067\t0.42877\t11.58928',
'rare610.txt\t610\t1\t12.9862\t0.42877\t11.58642'],
'chao1': ['\tsequences per sample\titeration\ts511\ts512\ts3',
'rare10.txt\t10\t0\t4.2\t3.1415\t9.11',
'rare10.txt\t10\t1\t5.6\t3.15\t9.62',
'rare310.txt\t310\t0\t83.11\t5.2012\t8.12',
'rare310.txt\t310\t1\t82.75\t5.2000\t8.24',
'rare610.txt\t610\t0\t11.11\t8.42\t1',
'rare610.txt\t610\t1\t12.111\t8.42\t1']
}
if __name__ == "__main__":
main()
| gpl-2.0 |
liaoqingwei/flask | tests/test_testing.py | 143 | 6479 | # -*- coding: utf-8 -*-
"""
tests.testing
~~~~~~~~~~~~~
Test client and more.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
from flask._compat import text_type
def test_environ_defaults_from_config():
app = flask.Flask(__name__)
app.testing = True
app.config['SERVER_NAME'] = 'example.com:1234'
app.config['APPLICATION_ROOT'] = '/foo'
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://example.com:1234/foo/'
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'http://example.com:1234/foo/'
def test_environ_defaults():
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.url
ctx = app.test_request_context()
assert ctx.request.url == 'http://localhost/'
with app.test_client() as c:
rv = c.get('/')
assert rv.data == b'http://localhost/'
def test_redirect_keep_session():
app = flask.Flask(__name__)
app.secret_key = 'testing'
@app.route('/', methods=['GET', 'POST'])
def index():
if flask.request.method == 'POST':
return flask.redirect('/getsession')
flask.session['data'] = 'foo'
return 'index'
@app.route('/getsession')
def get_session():
return flask.session.get('data', '<missing>')
with app.test_client() as c:
rv = c.get('/getsession')
assert rv.data == b'<missing>'
rv = c.get('/')
assert rv.data == b'index'
assert flask.session.get('data') == 'foo'
rv = c.post('/', data={}, follow_redirects=True)
assert rv.data == b'foo'
# This support requires a new Werkzeug version
if not hasattr(c, 'redirect_client'):
assert flask.session.get('data') == 'foo'
rv = c.get('/getsession')
assert rv.data == b'foo'
def test_session_transactions():
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
@app.route('/')
def index():
return text_type(flask.session['foo'])
with app.test_client() as c:
with c.session_transaction() as sess:
assert len(sess) == 0
sess['foo'] = [42]
assert len(sess) == 1
rv = c.get('/')
assert rv.data == b'[42]'
with c.session_transaction() as sess:
assert len(sess) == 1
assert sess['foo'] == [42]
def test_session_transactions_no_null_sessions():
app = flask.Flask(__name__)
app.testing = True
with app.test_client() as c:
try:
with c.session_transaction() as sess:
pass
except RuntimeError as e:
assert 'Session backend did not open a session' in str(e)
else:
assert False, 'Expected runtime error'
def test_session_transactions_keep_context():
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'testing'
with app.test_client() as c:
rv = c.get('/')
req = flask.request._get_current_object()
assert req is not None
with c.session_transaction():
assert req is flask.request._get_current_object()
def test_session_transaction_needs_cookies():
app = flask.Flask(__name__)
app.testing = True
c = app.test_client(use_cookies=False)
try:
with c.session_transaction() as s:
pass
except RuntimeError as e:
assert 'cookies' in str(e)
else:
assert False, 'Expected runtime error'
def test_test_client_context_binding():
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.route('/')
def index():
flask.g.value = 42
return 'Hello World!'
@app.route('/other')
def other():
1 // 0
with app.test_client() as c:
resp = c.get('/')
assert flask.g.value == 42
assert resp.data == b'Hello World!'
assert resp.status_code == 200
resp = c.get('/other')
assert not hasattr(flask.g, 'value')
assert b'Internal Server Error' in resp.data
assert resp.status_code == 500
flask.g.value = 23
try:
flask.g.value
except (AttributeError, RuntimeError):
pass
else:
raise AssertionError('some kind of exception expected')
def test_reuse_client():
app = flask.Flask(__name__)
c = app.test_client()
with c:
assert c.get('/').status_code == 404
with c:
assert c.get('/').status_code == 404
def test_test_client_calls_teardown_handlers():
app = flask.Flask(__name__)
called = []
@app.teardown_request
def remember(error):
called.append(error)
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
assert called == [None]
del called[:]
with app.test_client() as c:
assert called == []
c.get('/')
assert called == []
c.get('/')
assert called == [None]
assert called == [None, None]
def test_full_url_request():
app = flask.Flask(__name__)
app.testing = True
@app.route('/action', methods=['POST'])
def action():
return 'x'
with app.test_client() as c:
rv = c.post('http://domain.com/action?vodka=42', data={'gin': 43})
assert rv.status_code == 200
assert 'gin' in flask.request.form
assert 'vodka' in flask.request.args
def test_subdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/', subdomain='<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
def test_nosubdomain():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
@app.route('/<company_id>')
def view(company_id):
return company_id
with app.test_request_context():
url = flask.url_for('view', company_id='xxx')
with app.test_client() as c:
response = c.get(url)
assert 200 == response.status_code
assert b'xxx' == response.data
| bsd-3-clause |
marcoplaisier/pytoon | setup.py | 2 | 1197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='pytoon',
version='0.0.1',
description='PyToon measures electricity, water and gas meters and creates fancy graphs',
long_description=readme + '\n\n' + history,
author='Marco Plaisier',
author_email='[email protected]',
url='https://github.com/marcofinalist/pytoon',
packages=[
'pytoon',
],
package_dir={'pytoon': 'pytoon'},
include_package_data=True,
install_requires=[
],
license="BSD",
zip_safe=False,
keywords='pytoon',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
test_suite='tests',
) | bsd-3-clause |
siliconsmiley/QGIS | python/plugins/processing/algs/gdal/nearblack.py | 1 | 2664 | # -*- coding: utf-8 -*-
"""
***************************************************************************
nearblack.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.outputs import OutputRaster
from processing.algs.gdal.GdalUtils import GdalUtils
class nearblack(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NEAR = 'NEAR'
WHITE = 'WHITE'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Near black')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Analysis')
self.addParameter(ParameterRaster(nearblack.INPUT,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(nearblack.NEAR,
self.tr('How far from black (white)'), 0, None, 15))
self.addParameter(ParameterBoolean(nearblack.WHITE,
self.tr('Search for nearly white pixels instead of nearly black'),
False))
self.addOutput(OutputRaster(nearblack.OUTPUT, self.tr('Nearblack')))
def getConsoleCommands(self):
arguments = []
arguments.append('-o')
arguments.append(self.getOutputValue(nearblack.OUTPUT))
arguments.append('-near')
arguments.append(unicode(self.getParameterValue(nearblack.NEAR)))
if self.getParameterValue(nearblack.WHITE):
arguments.append('-white')
arguments.append(self.getParameterValue(nearblack.INPUT))
return ['nearblack', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 |
PythonicNinja/pydrill | pydrill/connection/base.py | 1 | 4011 | # -*- coding: utf-8 -*-
import logging
from ..exceptions import HTTP_EXCEPTIONS, TransportError
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('pydrill')
_tracer_already_configured = 'pydrill.trace' in logging.Logger.manager.loggerDict
tracer = logging.getLogger('pydrill.trace')
if not _tracer_already_configured:
tracer.propagate = False
class Connection(object):
"""
Class responsible for maintaining a connection to an Drill node.
You can create custom connection class similar to :class:`~pydrill.RequestsHttpConnection`
It's main interface (`perform_request`) is thread-safe.
Responsible for logging.
"""
transport_schema = 'http'
def __init__(self, host='localhost', port=8047, url_prefix='', timeout=10, **kwargs):
"""
:arg host: hostname of the node (default: localhost)
:arg port: port to use (integer, default: 8047)
:arg url_prefix: optional url prefix for pydrill
:arg timeout: default timeout in seconds (float, default: 10)
"""
self.host = '%s://%s:%s' % (self.transport_schema, host, port)
if url_prefix:
url_prefix = '/' + url_prefix.strip('/')
self.url_prefix = url_prefix
self.timeout = timeout
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.host)
def _pretty_json(self, data):
"""
pretty JSON in tracer curl logs
:param data:
:return:
"""
try:
return json.dumps(json.loads(data), sort_keys=True, indent=2, separators=(',', ': ')).replace("'",
r'\u0027')
except (ValueError, TypeError):
# non-json data or a bulk request
return data
def log_request_success(self, method, full_url, path, body, status_code, response, duration):
""" Log a successful API call. """
if body and not isinstance(body, dict):
body = body.decode('utf-8')
logger.info(
'%s %s [status:%s request:%.3fs]', method, full_url,
status_code, duration
)
logger.debug('> %s', body)
logger.debug('< %s', response)
if tracer.isEnabledFor(logging.INFO):
if self.url_prefix:
path = path.replace(self.url_prefix, '', 1)
tracer.info("curl -X%s 'http://localhost:8047%s' -d '%s'", method, path,
self._pretty_json(body) if body else '')
if tracer.isEnabledFor(logging.DEBUG):
tracer.debug('#[%s] (%.3fs)\n#%s', status_code, duration,
self._pretty_json(response).replace('\n', '\n#') if response else '')
def log_request_fail(self, method, full_url, body, duration, status_code=None, exception=None):
"""
Log an unsuccessful API call.
"""
logger.warning(
'%s %s [status:%s request:%.3fs]', method, full_url,
status_code or 'N/A', duration, exc_info=exception is not None
)
if body and not isinstance(body, dict):
body = body.decode('utf-8')
logger.debug('> %s', body)
def _raise_error(self, status_code, raw_data):
"""
Locate appropriate exception and raise it.
"""
error_message = raw_data
additional_info = None
try:
additional_info = json.loads(raw_data)
error_message = additional_info.get('error', error_message)
if isinstance(error_message, dict) and 'type' in error_message:
error_message = error_message['type']
except:
pass
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
def perform_request(self, method, url, params=None, body=None, timeout=None, ignore=()):
raise NotImplementedError
| mit |
NSLS-II/PyXRF | pyxrf/_version.py | 1 | 15432 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "pyxrf/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args, cwd=cwd, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print(
"guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix) :], "full-revisionid": None, "dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long"], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version"}
| bsd-3-clause |
asoliveira/NumShip | scripts/plot/leme-velo-v-zz-plt.py | 1 | 3018 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'leme-velo-v-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/velo.dat')
acelhis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/velo.dat')
acelhis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/velo.dat')
acelhis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/velo.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -1.5, 2.]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$v\prime$'
else:
ylabel = r'$v \quad m/s$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 2], color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 2], color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$r$')
plt.plot(acelhis3[:, 0], acelhis3[:, 2], color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$r$')
plt.plot(acelhis4[:, 0], acelhis4[:, 2], color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$r$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle =
"--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.1$r$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.2$r$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle
= "--",
linewidth = 1, label=ur'leme--1.3$r$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
ksweta/BeamIt-Server | beamit/resources/password.py | 1 | 1284 | from beamit.resources.base import Resource
class PasswordChangeRequest(Resource):
MEDIA_TYPE = 'application/vnd.beamit.password.change.request+json'
def __init__(self, email, password, new_password):
self.email = email
self.password = password
self.new_password = new_password
def __repr__(self):
return "<PasswordChangeRequest email: {}, password: {}, new_password: {}>".format(
self.email,
self.password,
self.new_password,
)
def to_dict(self):
return dict(email=self.email, password=self.password, new_password=self.new_password)
@classmethod
def from_dict(cls, dct):
return cls(
email=dct.get("email"),
password=dct.get("password"),
new_password=dct.get("new_password"),
)
class PasswordChangeResponse(Resource):
MEDIA_TYPE = 'application/vnd.beamit.password.change.response+json'
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return "<PasswordChangeResponse user_id: {}>".format(self.user_id)
def to_dict(self):
return dict(user_id=self.user_id)
@classmethod
def from_dict(cls, dct):
return cls(user_id=dct.get("user_id"))
| apache-2.0 |
40223211/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/_random.py | 518 | 2451 | from browser import window
def _randint(a, b):
return int(window.Math.random()*(b-a+1)+a)
def _urandom(n):
"""urandom(n) -> str
Return n random bytes suitable for cryptographic use."""
randbytes= [_randint(0,255) for i in range(n)]
return bytes(randbytes)
class Random:
"""Random number generator base class used by bound module functions.
Used to instantiate instances of Random to get generators that don't
share state.
Class Random can also be subclassed if you want to use a different basic
generator of your own devising: in that case, override the following
methods: random(), seed(), getstate(), and setstate().
Optionally, implement a getrandbits() method so that randrange()
can cover arbitrarily large ranges.
"""
#random
#seed
#getstate
#setstate
VERSION = 3 # used by getstate/setstate
def __init__(self, x=None):
"""Initialize an instance.
Optional argument x controls seeding, as for Random.seed().
"""
self._state=x
def seed(self, a=None, version=2):
"""Initialize internal state from hashable object.
None or no argument seeds from current time or from an operating
system specific randomness source if available.
For version 2 (the default), all of the bits are used if *a* is a str,
bytes, or bytearray. For version 1, the hash() of *a* is used instead.
If *a* is an int, all bits are used.
"""
self._state=a
self.gauss_next = None
def getstate(self):
"""Return internal state; can be passed to setstate() later."""
return self._state
def setstate(self, state):
"""Restore internal state from object returned by getstate()."""
self._state=state
def random(self):
"""Get the next random number in the range [0.0, 1.0)."""
return window.Math.random()
def getrandbits(self, k):
"""getrandbits(k) -> x. Generates a long int with k random bits."""
if k <= 0:
raise ValueError('number of bits must be greater than zero')
if k != int(k):
raise TypeError('number of bits should be an integer')
numbytes = (k + 7) // 8 # bits / 8 and rounded up
x = int.from_bytes(_urandom(numbytes), 'big')
return x >> (numbytes * 8 - k) # trim excess bits
| agpl-3.0 |
mrphrazer/miasm | example/jitter/example_types.py | 3 | 8126 | #! /usr/bin/env python2
"""This script is just a short example of common usages for miasm.core.types.
For a more complete view of what is possible, tests/core/types.py covers
most of the module possibilities, and the module doc gives useful information
as well.
"""
from __future__ import print_function
from miasm.core.utils import iterbytes
from miasm.analysis.machine import Machine
from miasm.core.types import MemStruct, Self, Void, Str, Array, Ptr, \
Num, Array, set_allocator
from miasm.os_dep.common import heap
# Instantiate a heap
my_heap = heap()
# And set it as the default memory allocator, to avoid manual allocation and
# explicit address passing to the MemType subclasses (like MemStruct)
# constructor
set_allocator(my_heap.vm_alloc)
# Let's reimplement a simple C generic linked list mapped on a VmMngr.
# All the structures and methods will use the python objects but all the data
# is in fact stored in the VmMngr
class ListNode(MemStruct):
fields = [
# The "<I" is the struct-like format of the pointer in memory, in this
# case a Little Endian 32 bits unsigned int.
# One way to handle reference to ListNode in ListNode is to use the
# special marker Self().
# You could also generate ListNode's fields with ListNode.gen_field
# after the class declaration, so that the ListNode is defined when
# fields are generated.
("next", Ptr("<I", Self())),
# Ptr(_, Void()) is analogous to void*, Void() is a kind of "empty type"
("data", Ptr("<I", Void())),
]
def get_next(self):
if self.next.val == 0:
return None
return self.next.deref
def get_data(self, data_type=None):
if data_type is not None:
return self.data.deref.cast(data_type)
else:
return self.data.deref
class LinkedList(MemStruct):
fields = [
# For convenience, either a Type instance (like Self() or Num("I") or a
# MemStruct subclass can be passed to the Ptr constructor.
("head", Ptr("<I", ListNode)),
("tail", Ptr("<I", ListNode)),
# Num can take any one-field struct-like format, including floats and
# doubles
("size", Num("<I")),
]
def get_head(self):
"""Returns the head ListNode instance"""
if self.head == 0:
return None
return self.head.deref
def get_tail(self):
"""Returns the tail ListNode instance"""
if self.tail == 0:
return None
return self.tail.deref
def push(self, data):
"""Push a data (MemType instance) to the linked list."""
# Allocate a new node
node = ListNode(self._vm)
# Set the data pointer
node.data = data.get_addr()
# re-link
if self.head != 0:
# get the head ListNode
head = self.get_head()
node.next = head.get_addr()
# pointer to head assigned to the new node address
self.head = node.get_addr()
# Do not forget the tail :)
if self.tail == 0:
self.tail = node.get_addr()
self.size += 1
def pop(self, data_type=None):
"""Pop one data from the LinkedList."""
# Nothing to pop
if self.head == 0:
return None
node = self.get_head()
self.head = node.next
# empty
if self.head == 0:
self.tail = 0
self.size -= 1
return node.get_data(data_type)
def empty(self):
"""True if the list is empty."""
return self.head == 0
def __iter__(self):
if not self.empty():
cur = self.get_head()
while cur is not None:
yield cur.data.deref
cur = cur.get_next()
# Some data types to put in the LinkedList and play with:
class DataArray(MemStruct):
fields = [
("val1", Num("B")),
("val2", Num("B")),
# Ptr can also be instantiated with a Type instance as an argument, the
# corresponding Memtype will be returned when dereferencing
# Here, data_array.array.deref will allow to access an Array
("arrayptr", Ptr("<I", Array(Num("B"), 16))),
# Array of 10 uint8
("array", Array(Num("B"), 16)),
]
class DataStr(MemStruct):
fields = [
("valshort", Num("<H")),
# Pointer to an utf16 null terminated string
("data", Ptr("<I", Str("utf16"))),
]
print("This script demonstrates a LinkedList implementation using the types ")
print("module in the first part, and how to play with some casts in the second.")
print()
# A random jitter
# You can also use miasm.jitter.VmMngr.Vm(), but it does not happen in real
# life scripts, so here is the usual way:
jitter = Machine("x86_32").jitter("python")
vm = jitter.vm
# Auto-allocated by my_heap. If you allocate memory at `addr`,
# `link = LinkedList(vm, addr)` will use this allocation. If you just want
# to read/modify existing struct, you may want to use the (vm, addr) syntax.
link = LinkedList(vm)
# memset the struct (with '\x00' by default)
link.memset()
# Push three uninitialized structures
link.push(DataArray(vm))
link.push(DataArray(vm))
link.push(DataArray(vm))
# Size has been updated
assert link.size == 3
# If you get it directly from the VM, it is updated as well
raw_size = vm.get_mem(link.get_addr("size"), link.get_type()
.get_field_type("size").size)
assert raw_size == b'\x03\x00\x00\x00'
print("The linked list just built:")
print(repr(link), '\n')
print("Its uninitialized data elements:")
for data in link:
# __iter__ returns MemVoids here, just cast them to the real data type
real_data = data.cast(DataArray)
print(repr(real_data))
print()
# Now let's play with one data
data = link.pop(DataArray)
assert link.size == 2
# Make the Array Ptr point to the data's array field
# Note: this is equivalent to data.arrayptr.val = ...
data.arrayptr = data.get_addr("array")
# Now the pointer dereference is equal to the array field's value
assert data.arrayptr.deref == data.array
# Let's say that it is a DataStr:
datastr = data.cast(DataStr)
print("First element casted to DataStr:")
print(repr(datastr))
print()
# data and datastr really share the same memory:
data.val1 = 0x34
data.val2 = 0x12
assert datastr.valshort == 0x1234
datastr.valshort = 0x1122
assert data.val1 == 0x22 and data.val2 == 0x11
# Let's play with strings
memstr = datastr.data.deref
# Note that memstr is Str("utf16")
memstr.val = 'Miams'
print("Cast data.array to MemStr and set the string value:")
print(repr(memstr))
print()
# If you followed, memstr and data.array point to the same object, so:
raw_miams = 'Miams'.encode('utf-16le') + b'\x00'*2
raw_miams_array = [ord(c) for c in iterbytes(raw_miams)]
assert list(data.array)[:len(raw_miams_array)] == raw_miams_array
assert data.array.cast(Str("utf16")) == memstr
# Default is "ansi"
assert data.array.cast(Str()) != memstr
assert data.array.cast(Str("utf16")).val == memstr.val
print("See that the original array has been modified:")
print(repr(data))
print()
# Some type manipulation examples, for example let's construct an argv for
# a program:
# Let's say that we have two arguments, +1 for the program name and +1 for the
# final null ptr in argv, the array has 4 elements:
argv_t = Array(Ptr("<I", Str()), 4)
print("3 arguments argv type:", argv_t)
# alloc argv somewhere
argv = argv_t.lval(vm)
# Auto alloc with the MemStr.from_str helper
MemStrAnsi = Str().lval
argv[0].val = MemStrAnsi.from_str(vm, "./my-program").get_addr()
argv[1].val = MemStrAnsi.from_str(vm, "arg1").get_addr()
argv[2].val = MemStrAnsi.from_str(vm, "27").get_addr()
argv[3].val = 0
# If you changed your mind on the second arg, you could do:
argv[2].deref.val = "42"
print("An argv instance:", repr(argv))
print("argv values:", repr([val.deref.val for val in argv[:-1]]))
print()
print("See test/core/types.py and the miasm.core.types module doc for ")
print("more information.")
| gpl-2.0 |
DelazJ/QGIS | tests/src/python/test_qgsvectorlayertools.py | 45 | 2025 | # -*- coding: utf-8 -*-
"""QGIS Unit test utils for provider tests.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Denis Rouzaud'
__date__ = '2016-11-07'
__copyright__ = 'Copyright 2015, The QGIS Project'
from qgis.core import QgsFeatureRequest, QgsVectorLayer, QgsProject, QgsVectorLayerTools
from qgis.testing import start_app, unittest
import os
start_app()
class SubQgsVectorLayerTools(QgsVectorLayerTools):
def __init__(self):
super().__init__()
def addFeature(self, layer):
pass
def startEditing(self, layer):
pass
def stopEditing(self, layer):
pass
def saveEdits(self, layer):
pass
class TestQgsVectorLayerTools(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Setup the involved layers and relations for a n:m relation
:return:
"""
cls.dbconn = 'service=\'qgis_test\''
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
# Create test layer
cls.vl = QgsVectorLayer(cls.dbconn + ' sslmode=disable key=\'pk\' table="qgis_test"."someData" (geom) sql=', 'layer', 'postgres')
QgsProject.instance().addMapLayer(cls.vl)
cls.vltools = SubQgsVectorLayerTools()
def testCopyMoveFeature(self):
""" Test copy and move features"""
rqst = QgsFeatureRequest()
rqst.setFilterFid(4)
self.vl.startEditing()
(ok, rqst, msg) = self.vltools.copyMoveFeatures(self.vl, rqst, -0.1, 0.2)
self.assertTrue(ok)
for f in self.vl.getFeatures(rqst):
geom = f.geometry()
self.assertAlmostEqual(geom.asPoint().x(), -65.42)
self.assertAlmostEqual(geom.asPoint().y(), 78.5)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
martijnvermaat/rpclib | src/rpclib/test/interop/server/httprpc_pod_basic.py | 1 | 1879 | #!/usr/bin/env python
#
# rpclib - Copyright (C) Rpclib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""pod being plain old data"""
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('rpclib.protocol.xml')
logger.setLevel(logging.DEBUG)
from rpclib.application import Application
from rpclib.test.interop.server._service import services
from rpclib.protocol.http import HttpRpc
from rpclib.interface.wsdl import Wsdl11
from rpclib.server.wsgi import WsgiApplication
httprpc_soap_application = Application(services,
'rpclib.test.interop.server.httprpc.pod', HttpRpc(), HttpRpc(), Wsdl11())
def main():
try:
from wsgiref.simple_server import make_server
from wsgiref.validate import validator
wsgi_application = WsgiApplication(httprpc_soap_application)
server = make_server('0.0.0.0', 9757, validator(wsgi_application))
logger.info('Starting interop server at %s:%s.' % ('0.0.0.0', 9757))
logger.info('WSDL is at: /?wsdl')
server.serve_forever()
except ImportError:
print("Error: example server code requires Python >= 2.5")
if __name__ == '__main__':
main()
| lgpl-2.1 |
Python4AstronomersAndParticlePhysicists/PythonWorkshop-ICE | notebooks/ml/solutions/exercise_6.py | 1 | 2217 | from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, average_precision_score
from sklearn.model_selection import StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_moons
from matplotlib import patches
X, y = make_moons(n_samples=5000, noise=0.9)
clf = DecisionTreeClassifier(min_samples_leaf=50)
cv = StratifiedKFold(n_splits=5)
fig, ([ax1, ax2], [ax3, ax4]) = plt.subplots(2, 2, figsize=(12, 12))
roc_auc = []
pr_auc = []
for train, test in cv.split(X, y):
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
clf.fit(X_train, y_train)
prediction = clf.predict_proba(X_test)[:, 1]
p, r, thresholds_pr = precision_recall_curve(y_test, prediction)
fpr, tpr, thresholds_roc = roc_curve(y_test, prediction)
roc_auc.append(roc_auc_score(y_test, prediction))
pr_auc.append(average_precision_score(y_test, prediction))
ax1.step(thresholds_pr, r[: -1], color='gray', where='post')
ax1.step(thresholds_pr, p[: -1], color='darkgray', where='post')
ax2.step(r, p, color='darkmagenta', where='post')
ax3.step(thresholds_roc, tpr, color='gray', where='post')
ax3.step(thresholds_roc, fpr, color='darkgray', where='post')
ax4.step(fpr, tpr, color='mediumvioletred', where='post')
p1 = patches.Patch(color='gray', label='Recall')
p2 = patches.Patch(color='darkgray', label='Precission')
ax1.legend(handles=[p1, p2])
ax1.set_xlabel('Decission Threshold')
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
ax2.set_xlim([0, 1])
ax2.set_ylim([0, 1])
ax2.set_ylabel('Precission')
ax2.set_xlabel('Recall')
s = 'AUC {:0.3f} +/- {:0.3f}'.format(np.array(pr_auc).mean(), np.array(pr_auc).std())
ax2.text(0.2, 0.2, s)
p1 = patches.Patch(color='gray', label='True Positive Rate')
p2 = patches.Patch(color='darkgray', label='False Positive Rate')
ax3.legend(handles=[p1, p2])
ax3.set_xlabel('Decission Threshold')
ax3.set_xlim([0, 1])
ax3.set_ylim([0, 1])
ax4.set_xlim([0, 1])
ax4.set_ylim([0, 1])
ax4.set_ylabel('True Positive Rate')
ax4.set_xlabel('False Positive Rate')
s = 'AUC {:0.3f} +/- {:0.3f}'.format(np.array(roc_auc).mean(), np.array(roc_auc).std())
ax4.text(0.2, 0.2, s)
None
| mit |
mydongistiny/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/common/system/user.py | 27 | 6861 | # Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
import logging
import os
import platform
import re
import shlex
import subprocess
import sys
import webbrowser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.platforminfo import PlatformInfo
_log = logging.getLogger(__name__)
try:
import readline
except ImportError:
if sys.platform != "win32":
# There is no readline module for win32, not much to do except cry.
_log.warn("Unable to import readline.")
class User(object):
DEFAULT_NO = 'n'
DEFAULT_YES = 'y'
def __init__(self, platforminfo=None):
# We cannot get the PlatformInfo object from a SystemHost because
# User is part of SystemHost itself.
self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
# FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
response = None
while (repeat and not response):
repeat -= 1
response = raw_input(message)
return response
@classmethod
def prompt_password(cls, message, repeat=1):
return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass)
@classmethod
def prompt_with_multiple_lists(cls, list_title, subtitles, lists, can_choose_multiple=False, raw_input=raw_input):
item_index = 0
cumulated_list = []
print list_title
for i in range(len(subtitles)):
print "\n" + subtitles[i]
for item in lists[i]:
item_index += 1
print "%2d. %s" % (item_index, item)
cumulated_list += lists[i]
return cls._wait_on_list_response(cumulated_list, can_choose_multiple, raw_input)
@classmethod
def _wait_on_list_response(cls, list_items, can_choose_multiple, raw_input):
while True:
if can_choose_multiple:
response = cls.prompt("Enter one or more numbers (comma-separated) or ranges (e.g. 3-7), or \"all\": ", raw_input=raw_input)
if not response.strip() or response == "all":
return list_items
try:
indices = []
for value in re.split("\s*,\s*", response):
parts = value.split('-')
if len(parts) == 2:
indices += range(int(parts[0]) - 1, int(parts[1]))
else:
indices.append(int(value) - 1)
except ValueError, err:
continue
return [list_items[i] for i in indices]
else:
try:
result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1
except ValueError, err:
continue
return list_items[result]
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
print list_title
i = 0
for item in list_items:
i += 1
print "%2d. %s" % (i, item)
return cls._wait_on_list_response(list_items, can_choose_multiple, raw_input)
def edit(self, files):
editor = os.environ.get("EDITOR") or "vi"
args = shlex.split(editor)
# Note: Not thread safe: http://bugs.python.org/issue2320
subprocess.call(args + files)
def _warn_if_application_is_xcode(self, edit_application):
if "Xcode" in edit_application:
print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"."
def edit_changelog(self, files):
edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
if edit_application and self._platforminfo.is_mac():
# On Mac we support editing ChangeLogs using an application.
args = shlex.split(edit_application)
print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
print "Please quit the editor application when done editing."
self._warn_if_application_is_xcode(edit_application)
subprocess.call(["open", "-W", "-n", "-a"] + args + files)
return
self.edit(files)
def page(self, message):
pager = os.environ.get("PAGER") or "less"
try:
# Note: Not thread safe: http://bugs.python.org/issue2320
child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
child_process.communicate(input=message)
except IOError, e:
pass
def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input):
if not message:
message = "Continue?"
choice = {'y': 'Y/n', 'n': 'y/N'}[default]
response = raw_input("%s [%s]: " % (message, choice))
if not response:
response = default
return response.lower() == 'y'
def can_open_url(self):
try:
webbrowser.get()
return True
except webbrowser.Error, e:
return False
def open_url(self, url):
if not self.can_open_url():
_log.warn("Failed to open %s" % url)
webbrowser.open(url)
| bsd-3-clause |
garvitr/sympy | sympy/printing/tree.py | 106 | 2450 | from __future__ import print_function, division
def pprint_nodes(subtrees):
"""
Prettyprints systems of nodes.
Examples
========
>>> from sympy.printing.tree import pprint_nodes
>>> print(pprint_nodes(["a", "b1\\nb2", "c"]))
+-a
+-b1
| b2
+-c
"""
def indent(s, type=1):
x = s.split("\n")
r = "+-%s\n" % x[0]
for a in x[1:]:
if a == "":
continue
if type == 1:
r += "| %s\n" % a
else:
r += " %s\n" % a
return r
if len(subtrees) == 0:
return ""
f = ""
for a in subtrees[:-1]:
f += indent(a)
f += indent(subtrees[-1], 2)
return f
def print_node(node):
"""
Returns information about the "node".
This includes class name, string representation and assumptions.
"""
s = "%s: %s\n" % (node.__class__.__name__, str(node))
d = node._assumptions
if len(d) > 0:
for a in sorted(d):
v = d[a]
if v is None:
continue
s += "%s: %s\n" % (a, v)
return s
def tree(node):
"""
Returns a tree representation of "node" as a string.
It uses print_node() together with pprint_nodes() on node.args recursively.
See also: print_tree()
"""
subtrees = []
for arg in node.args:
subtrees.append(tree(arg))
s = print_node(node) + pprint_nodes(subtrees)
return s
def print_tree(node):
"""
Prints a tree representation of "node".
Examples
========
>>> from sympy.printing import print_tree
>>> from sympy import Symbol
>>> x = Symbol('x', odd=True)
>>> y = Symbol('y', even=True)
>>> print_tree(y**x)
Pow: y**x
+-Symbol: y
| algebraic: True
| commutative: True
| complex: True
| even: True
| hermitian: True
| imaginary: False
| integer: True
| irrational: False
| noninteger: False
| odd: False
| rational: True
| real: True
| transcendental: False
+-Symbol: x
algebraic: True
commutative: True
complex: True
even: False
hermitian: True
imaginary: False
integer: True
irrational: False
noninteger: False
nonzero: True
odd: True
rational: True
real: True
transcendental: False
zero: False
See also: tree()
"""
print(tree(node))
| bsd-3-clause |
Weijing/Weijing.github.io | talkmap.py | 205 | 1188 |
# # Leaflet cluster map of talk locations
#
# (c) 2016-2017 R. Stuart Geiger, released under the MIT license
#
# Run this from the _talks/ directory, which contains .md files of all your talks.
# This scrapes the location YAML field from each .md file, geolocates it with
# geopy/Nominatim, and uses the getorg library to output data, HTML,
# and Javascript for a standalone cluster map.
#
# Requires: glob, getorg, geopy
import glob
import getorg
from geopy import Nominatim
g = glob.glob("*.md")
geocoder = Nominatim()
location_dict = {}
location = ""
permalink = ""
title = ""
for file in g:
with open(file, 'r') as f:
lines = f.read()
if lines.find('location: "') > 1:
loc_start = lines.find('location: "') + 11
lines_trim = lines[loc_start:]
loc_end = lines_trim.find('"')
location = lines_trim[:loc_end]
location_dict[location] = geocoder.geocode(location)
print(location, "\n", location_dict[location])
m = getorg.orgmap.create_map_obj()
getorg.orgmap.output_html_cluster_map(location_dict, folder_name="../talkmap", hashed_usernames=False)
| apache-2.0 |
fritsvanveen/QGIS | python/custom_widgets/qgis_customwidgets.py | 13 | 1934 | # -*- coding: utf-8 -*-
"""
***************************************************************************
customwidgets.py
---------------------
Date : May 2014
Copyright : (C) 2014 by Denis Rouzaud
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
"""
This file is used by pyuic to redirect includes
in custom widgets to the correct QGIS python packages.
It is copied on installation in /pythonX/dist-packages/PyQt4/uic/widget-plugins/
"""
# solution with CW_FILTER not fully working due to include of other files
# (e.g. for flags defined in other source files)
# pluginType = CW_FILTER
# def getFilter():
# import qgis.gui
#
# QGIS_widgets = {}
# for pyClass in dir(qgis.gui):
# QGIS_widgets[pyClass] = 'qgis.gui'
#
# def _QGISfilter(widgetname, baseclassname, module):
# print widgetname, baseclassname, module
# if widgetname in QGIS_widgets:
# return (MATCH, (widgetname, baseclassname, QGIS_widgets[widgetname]))
# else:
# return (NO_MATCH, None)
#
# return _QGISfilter
pluginType = MODULE
def moduleInformation():
try:
import qgis.gui
return "qgis.gui", dir(qgis.gui)
except ImportError:
return "", []
| gpl-2.0 |
jdemel/gnuradio | gr-channels/python/channels/amp_bal.py | 9 | 2511 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Amplitude Balance
# Author: mettus
# Description: Restores IQ amplitude balance
# Generated: Thu Aug 1 11:47:46 2013
##################################################
from __future__ import unicode_literals
from gnuradio import blocks
from gnuradio import gr
from gnuradio.filter import firdes
class amp_bal(gr.hier_block2):
def __init__(self, alpha=0):
gr.hier_block2.__init__(
self, "Amplitude Balance",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
##################################################
# Parameters
##################################################
self.alpha = alpha
##################################################
# Blocks
##################################################
self.blocks_rms_xx0 = blocks.rms_ff(alpha)
self.blocks_rms_xx = blocks.rms_ff(alpha)
self.blocks_multiply_vxx1 = blocks.multiply_vff(1)
self.blocks_float_to_complex = blocks.float_to_complex(1)
self.blocks_divide_xx = blocks.divide_ff(1)
self.blocks_complex_to_float = blocks.complex_to_float(1)
##################################################
# Connections
##################################################
self.connect((self.blocks_float_to_complex, 0), (self, 0))
self.connect((self, 0), (self.blocks_complex_to_float, 0))
self.connect((self.blocks_complex_to_float, 0), (self.blocks_rms_xx, 0))
self.connect((self.blocks_complex_to_float, 1), (self.blocks_rms_xx0, 0))
self.connect((self.blocks_rms_xx, 0), (self.blocks_divide_xx, 0))
self.connect((self.blocks_rms_xx0, 0), (self.blocks_divide_xx, 1))
self.connect((self.blocks_complex_to_float, 0), (self.blocks_float_to_complex, 0))
self.connect((self.blocks_complex_to_float, 1), (self.blocks_multiply_vxx1, 1))
self.connect((self.blocks_divide_xx, 0), (self.blocks_multiply_vxx1, 0))
self.connect((self.blocks_multiply_vxx1, 0), (self.blocks_float_to_complex, 1))
# QT sink close method reimplementation
def get_alpha(self):
return self.alpha
def set_alpha(self, alpha):
self.alpha = alpha
self.blocks_rms_xx.set_alpha(self.alpha)
self.blocks_rms_xx0.set_alpha(self.alpha)
| gpl-3.0 |
AlexCaranha/Wox | PythonHome/Lib/site-packages/pip/_vendor/requests/sessions.py | 294 | 22290 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in request_setting.items():
if v is None:
del merged_setting[k]
merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
while resp.is_redirect:
prepared_request = req.copy()
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not urlparse(url).netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary, codes.resume):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream',
'trust_env', 'max_redirects']
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the
request in seconds.
:param allow_redirects: (optional) Boolean. Set to True by default.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
method = builtin_str(method)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not verify and verify is not False:
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
# Send the request.
send_kwargs = {
'stream': stream,
'timeout': timeout,
'verify': verify,
'cert': cert,
'proxies': proxies,
'allow_redirects': allow_redirects,
}
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| mit |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_4/tests/modeltests/m2m_multiple/tests.py | 150 | 2385 | from __future__ import absolute_import
from datetime import datetime
from django.test import TestCase
from .models import Article, Category
class M2MMultipleTests(TestCase):
def test_multiple(self):
c1, c2, c3, c4 = [
Category.objects.create(name=name)
for name in ["Sports", "News", "Crime", "Life"]
]
a1 = Article.objects.create(
headline="Area man steals", pub_date=datetime(2005, 11, 27)
)
a1.primary_categories.add(c2, c3)
a1.secondary_categories.add(c4)
a2 = Article.objects.create(
headline="Area man runs", pub_date=datetime(2005, 11, 28)
)
a2.primary_categories.add(c1, c2)
a2.secondary_categories.add(c4)
self.assertQuerysetEqual(
a1.primary_categories.all(), [
"Crime",
"News",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a2.primary_categories.all(), [
"News",
"Sports",
],
lambda c: c.name
)
self.assertQuerysetEqual(
a1.secondary_categories.all(), [
"Life",
],
lambda c: c.name
)
self.assertQuerysetEqual(
c1.primary_article_set.all(), [
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c1.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c2.primary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c2.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c3.primary_article_set.all(), [
"Area man steals",
],
lambda a: a.headline
)
self.assertQuerysetEqual(
c3.secondary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.primary_article_set.all(), []
)
self.assertQuerysetEqual(
c4.secondary_article_set.all(), [
"Area man steals",
"Area man runs",
],
lambda a: a.headline
)
| mit |
Flimm/django-fullurl | fullurl/templatetags/fullurl.py | 1 | 1889 | from django import template
from django.template import defaulttags
from django.template.base import Node
from django.templatetags.static import do_static
register = template.Library()
@register.tag
def fullurl(parser, token):
"""Return an absolute URL (including the scheme and domain) matching the
given view with its parameters.
This is meant to be identical to the built-in tag `url`, except that it
always returns an absolute URL with the scheme and authority parts.
For example, take this `url` tag:
{% url "articles:article" slug="hello" %}
This could return:
/articles/hello
This is considered an absolute URL because it begins with a forward-slash,
however, it is not an absolute absolute URL, because it does not include
the scheme and authority parts.
Compare with this `fullurl` tag:
{% fullurl "articles:article" slug="hello" %}
This returns:
http://example.com/articles/hello
"""
return FullURLNode(defaulttags.url(parser, token))
@register.tag
def fullstatic(parser, token):
return FullStaticNode(do_static(parser, token))
@register.simple_tag(takes_context=True)
def buildfullurl(context, url):
"""Converts relative URL to absolute.
For example:
{% buildfullurl article.get_absolute_url %}
or:
{% buildfullurl "/custom-url/" %}
"""
return context.request.build_absolute_uri(url)
class FullURLNode(Node):
def __init__(self, subject):
self._subject = subject
def render(self, context):
url = self._subject.render(context)
return context.request.build_absolute_uri(url)
class FullStaticNode(Node):
def __init__(self, subject):
self._subject = subject
def render(self, context):
url = self._subject.render(context)
return context.request.build_absolute_uri(url)
| mit |
LyreOrpheus/lyreserver | MusicBot/musicbot/exceptions.py | 14 | 2599 | import shutil
import textwrap
# Base class for exceptions
class MusicbotException(Exception):
def __init__(self, message, *, expire_in=0):
self._message = message
self.expire_in = expire_in
@property
def message(self):
return self._message
@property
def message_no_format(self):
return self._message
# Something went wrong during the processing of a command
class CommandError(MusicbotException):
pass
# Something went wrong during the processing of a song/ytdl stuff
class ExtractionError(MusicbotException):
pass
# The no processing entry type failed and an entry was a playlist/vice versa
class WrongEntryTypeError(ExtractionError):
def __init__(self, message, is_playlist, use_url):
super().__init__(message)
self.is_playlist = is_playlist
self.use_url = use_url
# The user doesn't have permission to use a command
class PermissionsError(CommandError):
@property
def message(self):
return "You don't have permission to use that command.\nReason: " + self._message
# Error with pretty formatting for hand-holding users through various errors
class HelpfulError(MusicbotException):
def __init__(self, issue, solution, *, preface="An error has occured:\n", expire_in=0):
self.issue = issue
self.solution = solution
self.preface = preface
self.expire_in = expire_in
@property
def message(self):
return ("\n{}\n{}\n{}\n").format(
self.preface,
self._pretty_wrap(self.issue, " Problem: "),
self._pretty_wrap(self.solution, " Solution: "))
@property
def message_no_format(self):
return "\n{}\n{}\n{}\n".format(
self.preface,
self._pretty_wrap(self.issue, " Problem: ", width=None),
self._pretty_wrap(self.solution, " Solution: ", width=None))
@staticmethod
def _pretty_wrap(text, pretext, *, width=-1):
if width is None:
return pretext + text
elif width == -1:
width = shutil.get_terminal_size().columns
l1, *lx = textwrap.wrap(text, width=width - 1 - len(pretext))
lx = [((' ' * len(pretext)) + l).rstrip().ljust(width) for l in lx]
l1 = (pretext + l1).ljust(width)
return ''.join([l1, *lx])
class HelpfulWarning(HelpfulError):
pass
# Base class for control signals
class Signal(Exception):
pass
# signal to restart the bot
class RestartSignal(Signal):
pass
# signal to end the bot "gracefully"
class TerminateSignal(Signal):
pass
| mit |
icio/github3.py | github3/repos/comparison.py | 10 | 2875 | # -*- coding: utf-8 -*-
"""
github3.repos.comparison
========================
This module contains the Comparison object for comparing two commits via the
GitHub API.
"""
from __future__ import unicode_literals
from ..models import GitHubCore
from .commit import RepoCommit
class Comparison(GitHubCore):
"""The :class:`Comparison <Comparison>` object. This encapsulates the
information returned by GitHub comparing two commit objects in a
repository.
Two comparison instances can be checked like so::
c1 == c2
c1 != c2
And is equivalent to::
c1.commits == c2.commits
c1.commits != c2.commits
See also:
http://developer.github.com/v3/repos/commits/#compare-two-commits
"""
def _update_attributes(self, compare):
self._api = compare.get('url', '')
#: URL to view the comparison at GitHub
self.html_url = compare.get('html_url')
#: Permanent link to this comparison.
self.permalink_url = compare.get('permalink_url')
#: URL to see the diff between the two commits.
self.diff_url = compare.get('diff_url')
#: Patch URL at GitHub for the comparison.
self.patch_url = compare.get('patch_url')
#: :class:`RepoCommit <github3.repos.commit.RepoCommit>` object
#: representing the base of comparison.
self.base_commit = RepoCommit(compare.get('base_commit'), None)
#: Behind or ahead.
self.status = compare.get('status')
#: Number of commits ahead by.
self.ahead_by = compare.get('ahead_by')
#: Number of commits behind by.
self.behind_by = compare.get('behind_by')
#: Number of commits difference in the comparison.
self.total_commits = compare.get('total_commits')
#: List of :class:`RepoCommit <github3.repos.commit.RepoCommit>`
#: objects.
self.commits = [RepoCommit(com) for com in compare.get('commits')]
#: List of dicts describing the files modified.
self.files = compare.get('files', [])
self._uniq = self.commits
def _repr(self):
return '<Comparison of {0} commits>'.format(self.total_commits)
def diff(self):
"""Retrieve the diff for this comparison.
:returns: the diff as a bytes object
:rtype: bytes
"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.diff'})
return resp.content if self._boolean(resp, 200, 404) else b''
def patch(self):
"""Retrieve the patch formatted diff for this commit.
:returns: the patch as a bytes object
:rtype: bytes
"""
resp = self._get(self._api,
headers={'Accept': 'application/vnd.github.patch'})
return resp.content if self._boolean(resp, 200, 404) else b''
| bsd-3-clause |
Varentsov/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/acceptance_test.py | 12 | 29172 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import sys
import _pytest._code
import py
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
class TestGeneralUsage(object):
def test_config_error(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
import pytest
raise pytest.UsageError("hello")
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
result.stderr.fnmatch_lines([
'*ERROR: hello'
])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest("""
def pytest_sessionstart():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines([
'*INTERNALERROR*def pytest_sessionstart():*',
'*INTERNALERROR*0 / 0*',
])
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest("""
def pytest_configure():
0 / 0
""")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines([
'*INTERNALERROR*File*conftest.py*line 2*',
'*0 / 0*',
])
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest("""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
""")
result = testdir.runpytest("-s", "asd")
assert result.ret == 4 # EXIT_USAGEERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines([
"*---configure",
"*---unconfigure",
])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
""")
testdir.makepyfile(test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
""")
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines([
'*1 passed*',
])
def test_assertion_magic(self, testdir):
p = testdir.makepyfile("""
def test_this():
x = 0
assert x
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"> assert x",
"E assert 0",
])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile("""
import import_fails
def test_this():
assert import_fails.a == 1
""")
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
# XXX on jython this fails: "> import import_fails",
"ImportError while importing test module*",
"*No module named *does_not_work*",
])
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret
result.stderr.fnmatch_lines([
"*ERROR: not found:*%s" % (p2.basename,)
])
def test_issue486_better_reporting_on_conftest_load_failure(self, testdir):
testdir.makepyfile("")
testdir.makeconftest("import qwerty")
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines("""
*--version*
*warning*conftest.py*
""")
result = testdir.runpytest()
result.stderr.fnmatch_lines("""
*ERROR*could not load*conftest.py*
""")
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest("""
import pytest
def pytest_collect_directory():
pytest.skip("early")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skip*"
])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.makeconftest("""
import pytest
class MyFile(pytest.File):
def collect(self):
return [MyItem("hello", parent=self)]
def pytest_collect_file(path, parent):
return MyFile(path, parent)
class MyItem(pytest.Item):
pass
""")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines([
"*MyFile*test_issue88*",
"*Module*test_issue88*",
])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest("""
import sys
print ("should not be seen")
sys.stderr.write("stder42\\n")
""")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert "should not be seen" not in result.stdout.str()
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest("""
print ("should be seen")
assert 0
""")
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_chdir(self, testdir):
testdir.tmpdir.join("py").mksymlinkto(py._pydir)
p = testdir.tmpdir.join("main.py")
p.write(_pytest._code.Source("""
import sys, os
sys.path.insert(0, '')
import py
print (py.__file__)
print (py.__path__)
os.chdir(os.path.dirname(os.getcwd()))
print (py.log)
"""))
result = testdir.runpython(p)
assert not result.ret
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.tmpdir.mkdir("sub1")
sub2 = testdir.tmpdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == EXIT_NOTESTSCOLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest(sub1)
assert result.ret == EXIT_USAGEERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
""")
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest("""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
""")
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 pass*",
])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_generate_tests(metafunc):
metafunc.addcall({'x': 3}, id='hello-123')
def pytest_runtest_setup(item):
print (item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
""")
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall({'i': 1}, id="1")
metafunc.addcall({'i': 2}, id="2")
def test_func(i):
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile("""
def test_func():
pass
""")
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest("""
def pytest_configure():
x
""")
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines([
"INTERNAL*pytest_configure*",
"INTERNAL*x*",
])
assert 'sessionstarttime' not in result.stderr.str()
@pytest.mark.parametrize('lookfor', ['test_fun.py::test_a'])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(test_fun="""
def test_a():
pass
def""")
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(['*SyntaxError*'])
if '::' in lookfor:
result.stderr.fnmatch_lines([
'*ERROR*',
])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines([
"*ERROR*test_a.py::a*",
"*ERROR*test_b.py::b*",
])
@pytest.mark.usefixtures('recwarn')
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""
Ref #383. Python 3.3's namespace package messed with our import hooks
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
Use recwarn here to silence this warning in Python 2.7:
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
"""
testdir.mkdir('not_a_package')
p = testdir.makepyfile("""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
""")
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines("""
*unrecognized*
""")
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile("""
def raise_error(obj):
raise IOError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
""")
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
"*source code not available*",
"E*fixture 'invalid_fixture' not found",
])
def test_plugins_given_as_strings(self, tmpdir, monkeypatch):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=['invalid.module'])
assert 'invalid' in str(excinfo.value)
p = tmpdir.join('test_test_plugins_given_as_strings.py')
p.write('def test_foo(): pass')
mod = py.std.types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, 'myplugin', mod)
assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile("""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines([
'*1 passed*'
])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
p = testdir.makepyfile(u"""
# encoding: UTF-8
import pytest
@pytest.mark.parametrize("data", [b"\\x00", "\\x00", u'ação'])
def test_foo(data):
assert data
""")
res = testdir.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants(object):
def test_earlyinit(self, testdir):
p = testdir.makepyfile("""
import pytest
assert hasattr(pytest, 'mark')
""")
result = testdir.runpython(p)
assert result.ret == 0
@pytest.mark.xfail("sys.platform.startswith('java')")
def test_pydoc(self, testdir):
for name in ('py.test', 'pytest'):
result = testdir.runpython_c("import %s;help(%s)" % (name, name))
assert result.ret == 0
s = result.stdout.str()
assert 'MarkGenerator' in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile("""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile("""
from pytest import *
#Item
#File
main
skip
xfail
""")
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(run="""
import pytest
pytest.main()
pytest.main()
""")
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"*1 passed*",
"*1 passed*",
])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(py.std.sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_string(self, capsys):
retcode = pytest.main("-h")
assert not retcode
out, err = capsys.readouterr()
assert "--help" in out
pytest.raises(ValueError, lambda: pytest.main(0))
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == EXIT_NOTESTSCOLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, testdir, capsys):
class MyPlugin(object):
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write('raise ImportError')
result = testdir.runpytest_subprocess("--pyargs", "tpkg.test_hello")
assert result.ret != 0
result.stdout.fnmatch_lines([
"collected*0*items*/*1*errors"
])
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
result = testdir.runpytest("--pyargs", "tpkg.test_hello")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*1 passed*"
])
def join_pythonpath(what):
cur = py.std.os.environ.get('PYTHONPATH')
if cur:
return str(what) + os.pathsep + cur
return what
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter('ignore', ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*2 passed*"
])
monkeypatch.setenv('PYTHONPATH', join_pythonpath(testdir))
result = testdir.runpytest("--pyargs", "tpkg.test_missing")
assert result.ret != 0
result.stderr.fnmatch_lines([
"*not*found*test_missing*",
])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""
test --pyargs option with namespace packages (#1567)
"""
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)")
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{0}.py".format(dirname)). \
write("def test_{0}(): pass\n"
"def test_other():pass".format(dirname))
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
def join_pythonpath(*dirs):
cur = py.std.os.environ.get('PYTHONPATH')
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
monkeypatch.setenv('PYTHONPATH', join_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
os.chdir('world')
# mixed module and filenames:
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
testdir.chdir()
assert result.ret == 0
result.stdout.fnmatch_lines([
"*test_hello.py::test_hello*PASSED*",
"*test_hello.py::test_other*PASSED*",
"*test_world.py::test_world*PASSED*",
"*test_world.py::test_other*PASSED*",
"*4 passed*"
])
# specify tests within a module
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*test_world.py::test_other*PASSED*",
"*1 passed*"
])
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines([
"ERROR*file*or*package*not*found*",
])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile("""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
""")
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile('.txt', """
>>> x=3
>>> x
4
""")
result = testdir.runpytest("-rf")
lines = result.stdout.str().splitlines()
for line in lines:
if line.startswith("FAIL "):
testid = line[5:].strip()
break
result = testdir.runpytest(testid, '-rf')
result.stdout.fnmatch_lines([
line,
"*1 failed*",
])
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin('python')
class TestDurations(object):
source = """
import time
frag = 0.002
def test_something():
pass
def test_2():
time.sleep(frag*5)
def test_1():
time.sleep(frag)
def test_3():
time.sleep(frag*10)
"""
def test_calls(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random([
"*durations*",
"*call*test_3*",
"*call*test_2*",
"*call*test_1*",
])
def test_calls_show_2(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=0")
assert result.ret == 0
for x in "123":
for y in 'call', : # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found %s %s" % (x, y))
def test_with_deselected(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*durations*",
"*call*test_1*",
])
def test_with_failing_collection(self, testdir):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines([
"*Interrupted: 1 errors during collection*",
])
# Collection errors abort test execution, therefore no duration is
# output
assert "duration" not in result.stdout.str()
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("-k not 1")
assert result.ret == 0
class TestDurationWithFixture(object):
source = """
import time
frag = 0.001
def setup_function(func):
time.sleep(frag * 3)
def test_1():
time.sleep(frag*2)
def test_2():
time.sleep(frag)
"""
def test_setup_function(self, testdir):
testdir.makepyfile(self.source)
result = testdir.runpytest("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random("""
*durations*
* setup *test_1*
* call *test_1*
""")
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip('zipapp')
testdir.tmpdir.join('app').ensure(dir=1)
testdir.makepyfile(**{
'app/foo.py': """
import pytest
def main():
pytest.main(['--pyarg', 'foo'])
""",
})
target = tmpdir.join('foo.zip')
zipapp.create_archive(str(testdir.tmpdir.join('app')), str(target), main='foo:main')
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(['*not found*foo*'])
assert 'INTERNALERROR>' not in result.stdout.str()
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(
myplugin='',
)
testdir.makepyfile("""
def test(): pass
""")
testdir.makeconftest("""
pytest_plugins = [u'myplugin']
""")
r = testdir.runpytest()
assert r.ret == 0
def test_deferred_hook_checking(testdir):
"""
Check hooks as late as possible (#1821).
"""
testdir.syspathinsert()
testdir.makepyfile(**{
'plugin.py': """
class Hooks:
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
'conftest.py': """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
'test_foo.py': """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
"""
})
result = testdir.runpytest()
result.stdout.fnmatch_lines(['* 1 passed *'])
| mpl-2.0 |
rgommers/numpy | numpy/f2py/tests/test_return_complex.py | 17 | 4615 | import pytest
from numpy import array
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnComplex(util.F2PyTest):
def check_function(self, t, tname):
if tname in ['t0', 't8', 's0', 's8']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234j) - 234.0j) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
#assert_( abs(t('234')-234.)<=err)
#assert_( abs(t('234.6')-234.6)<=err)
assert_(abs(t(-234) + 234.) <= err)
assert_(abs(t([234]) - 234.) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22.) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'q')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err)
assert_(abs(t(array([234], 'D')) - 234.) <= err)
#assert_raises(TypeError, t, array([234], 'a1'))
assert_raises(TypeError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(TypeError, t, t)
assert_raises(TypeError, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r))
except OverflowError:
pass
class TestF77ReturnComplex(TestReturnComplex):
code = """
function t0(value)
complex value
complex t0
t0 = value
end
function t8(value)
complex*8 value
complex*8 t8
t8 = value
end
function t16(value)
complex*16 value
complex*16 t16
t16 = value
end
function td(value)
double complex value
double complex td
td = value
end
subroutine s0(t0,value)
complex value
complex t0
cf2py intent(out) t0
t0 = value
end
subroutine s8(t8,value)
complex*8 value
complex*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine s16(t16,value)
complex*16 value
complex*16 t16
cf2py intent(out) t16
t16 = value
end
subroutine sd(td,value)
double complex value
double complex td
cf2py intent(out) td
td = value
end
"""
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
class TestF90ReturnComplex(TestReturnComplex):
suffix = ".f90"
code = """
module f90_return_complex
contains
function t0(value)
complex :: value
complex :: t0
t0 = value
end function t0
function t8(value)
complex(kind=4) :: value
complex(kind=4) :: t8
t8 = value
end function t8
function t16(value)
complex(kind=8) :: value
complex(kind=8) :: t16
t16 = value
end function t16
function td(value)
double complex :: value
double complex :: td
td = value
end function td
subroutine s0(t0,value)
complex :: value
complex :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s8(t8,value)
complex(kind=4) :: value
complex(kind=4) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine s16(t16,value)
complex(kind=8) :: value
complex(kind=8) :: t16
!f2py intent(out) t16
t16 = value
end subroutine s16
subroutine sd(td,value)
double complex :: value
double complex :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_complex
"""
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_complex, name), name)
| bsd-3-clause |
ashray/VTK-EVM | Wrapping/Python/vtk/util/numpy_support.py | 6 | 8679 | """This module adds support to easily import and export NumPy
(http://numpy.scipy.org) arrays into/out of VTK arrays. The code is
loosely based on TVTK (https://svn.enthought.com/enthought/wiki/TVTK).
This code depends on an addition to the VTK data arrays made by Berk
Geveci to make it support Python's buffer protocol (on Feb. 15, 2008).
The main functionality of this module is provided by the two functions:
numpy_to_vtk,
vtk_to_numpy.
Caveats:
--------
- Bit arrays in general do not have a numpy equivalent and are not
supported. Char arrays are also not easy to handle and might not
work as you expect. Patches welcome.
- You need to make sure you hold a reference to a Numpy array you want
to import into VTK. If not you'll get a segfault (in the best case).
The same holds in reverse when you convert a VTK array to a numpy
array -- don't delete the VTK array.
Created by Prabhu Ramachandran in Feb. 2008.
"""
import vtk
import numpy
# Useful constants for VTK arrays.
VTK_ID_TYPE_SIZE = vtk.vtkIdTypeArray().GetDataTypeSize()
if VTK_ID_TYPE_SIZE == 4:
ID_TYPE_CODE = numpy.int32
elif VTK_ID_TYPE_SIZE == 8:
ID_TYPE_CODE = numpy.int64
VTK_LONG_TYPE_SIZE = vtk.vtkLongArray().GetDataTypeSize()
if VTK_LONG_TYPE_SIZE == 4:
LONG_TYPE_CODE = numpy.int32
ULONG_TYPE_CODE = numpy.uint32
elif VTK_LONG_TYPE_SIZE == 8:
LONG_TYPE_CODE = numpy.int64
ULONG_TYPE_CODE = numpy.uint64
def get_vtk_array_type(numpy_array_type):
"""Returns a VTK typecode given a numpy array."""
# This is a Mapping from numpy array types to VTK array types.
_np_vtk = {numpy.character:vtk.VTK_UNSIGNED_CHAR,
numpy.uint8:vtk.VTK_UNSIGNED_CHAR,
numpy.uint16:vtk.VTK_UNSIGNED_SHORT,
numpy.uint32:vtk.VTK_UNSIGNED_INT,
numpy.uint64:vtk.VTK_UNSIGNED_LONG_LONG,
numpy.int8:vtk.VTK_CHAR,
numpy.int16:vtk.VTK_SHORT,
numpy.int32:vtk.VTK_INT,
numpy.int64:vtk.VTK_LONG_LONG,
numpy.float32:vtk.VTK_FLOAT,
numpy.float64:vtk.VTK_DOUBLE,
numpy.complex64:vtk.VTK_FLOAT,
numpy.complex128:vtk.VTK_DOUBLE}
for key, vtk_type in _np_vtk.items():
if numpy_array_type == key or \
numpy.issubdtype(numpy_array_type, key) or \
numpy_array_type == numpy.dtype(key):
return vtk_type
raise TypeError, \
'Could not find a suitable VTK type for %s' % (str(numpy_array_type))
def get_vtk_to_numpy_typemap():
"""Returns the VTK array type to numpy array type mapping."""
_vtk_np = {vtk.VTK_BIT:numpy.bool,
vtk.VTK_CHAR:numpy.int8,
vtk.VTK_UNSIGNED_CHAR:numpy.uint8,
vtk.VTK_SHORT:numpy.int16,
vtk.VTK_UNSIGNED_SHORT:numpy.uint16,
vtk.VTK_INT:numpy.int32,
vtk.VTK_UNSIGNED_INT:numpy.uint32,
vtk.VTK_LONG:LONG_TYPE_CODE,
vtk.VTK_LONG_LONG:numpy.int64,
vtk.VTK_UNSIGNED_LONG:ULONG_TYPE_CODE,
vtk.VTK_UNSIGNED_LONG_LONG:numpy.uint64,
vtk.VTK_ID_TYPE:ID_TYPE_CODE,
vtk.VTK_FLOAT:numpy.float32,
vtk.VTK_DOUBLE:numpy.float64}
return _vtk_np
def get_numpy_array_type(vtk_array_type):
"""Returns a numpy array typecode given a VTK array type."""
return get_vtk_to_numpy_typemap()[vtk_array_type]
def create_vtk_array(vtk_arr_type):
"""Internal function used to create a VTK data array from another
VTK array given the VTK array type.
"""
return vtk.vtkDataArray.CreateDataArray(vtk_arr_type)
def numpy_to_vtk(num_array, deep=0, array_type=None):
"""Converts a contiguous real numpy Array to a VTK array object.
This function only works for real arrays that are contiguous.
Complex arrays are NOT handled. It also works for multi-component
arrays. However, only 1, and 2 dimensional arrays are supported.
This function is very efficient, so large arrays should not be a
problem.
If the second argument is set to 1, the array is deep-copied from
from numpy. This is not as efficient as the default behavior
(shallow copy) and uses more memory but detaches the two arrays
such that the numpy array can be released.
WARNING: You must maintain a reference to the passed numpy array, if
the numpy data is gc'd and VTK will point to garbage which will in
the best case give you a segfault.
Parameters
----------
- num_array : a contiguous 1D or 2D, real numpy array.
"""
z = numpy.asarray(num_array)
shape = z.shape
assert z.flags.contiguous, 'Only contiguous arrays are supported.'
assert len(shape) < 3, \
"Only arrays of dimensionality 2 or lower are allowed!"
assert not numpy.issubdtype(z.dtype, complex), \
"Complex numpy arrays cannot be converted to vtk arrays."\
"Use real() or imag() to get a component of the array before"\
" passing it to vtk."
# First create an array of the right type by using the typecode.
if array_type:
vtk_typecode = array_type
else:
vtk_typecode = get_vtk_array_type(z.dtype)
result_array = create_vtk_array(vtk_typecode)
# Fixup shape in case its empty or scalar.
try:
testVar = shape[0]
except:
shape = (0,)
# Find the shape and set number of components.
if len(shape) == 1:
result_array.SetNumberOfComponents(1)
else:
result_array.SetNumberOfComponents(shape[1])
result_array.SetNumberOfTuples(shape[0])
# Ravel the array appropriately.
arr_dtype = get_numpy_array_type(vtk_typecode)
if numpy.issubdtype(z.dtype, arr_dtype) or \
z.dtype == numpy.dtype(arr_dtype):
z_flat = numpy.ravel(z)
else:
z_flat = numpy.ravel(z).astype(arr_dtype)
# z_flat is now a standalone object with no references from the caller.
# As such, it will drop out of this scope and cause memory issues if we
# do not deep copy its data.
deep = 1
# Point the VTK array to the numpy data. The last argument (1)
# tells the array not to deallocate.
result_array.SetVoidArray(z_flat, len(z_flat), 1)
if deep:
copy = result_array.NewInstance()
copy.DeepCopy(result_array)
result_array = copy
return result_array
def numpy_to_vtkIdTypeArray(num_array, deep=0):
isize = vtk.vtkIdTypeArray().GetDataTypeSize()
dtype = num_array.dtype
if isize == 4:
if dtype != numpy.int32:
raise ValueError, \
'Expecting a numpy.int32 array, got %s instead.' % (str(dtype))
else:
if dtype != numpy.int64:
raise ValueError, \
'Expecting a numpy.int64 array, got %s instead.' % (str(dtype))
return numpy_to_vtk(num_array, deep, vtk.VTK_ID_TYPE)
def vtk_to_numpy(vtk_array):
"""Converts a VTK data array to a numpy array.
Given a subclass of vtkDataArray, this function returns an
appropriate numpy array containing the same data -- it actually
points to the same data.
WARNING: This does not work for bit arrays.
Parameters
----------
- vtk_array : `vtkDataArray`
The VTK data array to be converted.
"""
typ = vtk_array.GetDataType()
assert typ in get_vtk_to_numpy_typemap().keys(), \
"Unsupported array type %s"%typ
assert typ != vtk.VTK_BIT, 'Bit arrays are not supported.'
shape = vtk_array.GetNumberOfTuples(), \
vtk_array.GetNumberOfComponents()
# Get the data via the buffer interface
dtype = get_numpy_array_type(typ)
try:
result = numpy.frombuffer(vtk_array, dtype=dtype)
except ValueError:
# http://mail.scipy.org/pipermail/numpy-tickets/2011-August/005859.html
# numpy 1.5.1 (and maybe earlier) has a bug where if frombuffer is
# called with an empty buffer, it throws ValueError exception. This
# handles that issue.
if shape[0] == 0:
# create an empty array with the given shape.
result = numpy.empty(shape, dtype=dtype)
else:
raise
if shape[1] == 1:
shape = (shape[0], )
try:
result.shape = shape
except ValueError:
if shape[0] == 0:
# Refer to https://github.com/numpy/numpy/issues/2536 .
# For empty array, reshape fails. Create the empty array explicitly
# if that happens.
result = numpy.empty(shape, dtype=dtype)
else: raise
return result
| bsd-3-clause |
andymiller/vae-flow | vae/nnet.py | 1 | 3389 | import numpy as np
import tensorflow as tf
### constructing and composing layers
def make_layer(dot, activation):
def layer(W, b):
def apply(h):
return activation(dot(h, W) + b)
return apply
return layer
# TODO - a stochastic layer is really a composition of a deterministic layer
# and adding some gaussian noise, h_L => stochastic => h_{L-1}: T(h_L) + G \eta
def make_stochastic_layer(dot, activation):
def layer(G, b):
eta_dim = G.get_shape()[1].value
def apply(h):
h0 = h.get_shape()[0].value
eta = tf.random_normal(shape=[h0, eta_dim])
return h + dot(eta, G)
return apply
return layer
def compose(layers):
return reduce(lambda f,g: lambda h: g(f(h)), layers, lambda x: x)
### initialization
def init_tensor(shape, name=None, scale=.15):
init = tf.truncated_normal(shape, stddev=scale, dtype=tf.float32)
return tf.Variable(init, name=name, dtype=np.float32)
def init_layer(shape, layer, layer_name="", scale=.15):
if layer is not stochastic_tanh_layer:
return init_tensor(shape, name="%s_W"%layer_name, scale=scale), \
init_tensor([shape[1]], name="%s_b"%layer_name, scale=scale)
else:
return init_stochastic_layer(shape, layer_name)
def init_stochastic_layer(shape, layer_name=""):
return init_tensor(shape, name="%s_G"%layer_name), None
### tensorflow-backed layers
tanh_layer = make_layer(tf.matmul, tf.tanh)
sigmoid_layer = make_layer(tf.matmul, tf.nn.sigmoid)
relu_layer = make_layer(tf.matmul, tf.nn.relu)
linear_layer = make_layer(tf.matmul, lambda x: x)
stochastic_tanh_layer = make_stochastic_layer(tf.matmul, tf.tanh)
### numpy-backed layers
numpy_tanh_layer = make_layer(np.dot, np.tanh)
numpy_sigmoid_layer = make_layer(np.dot, lambda x: 1./(1. + np.exp(-x)))
numpy_linear_layer = make_layer(np.dot, lambda x: x)
### mlp-maker
def make_mlp(layers, out_dim, init_scale=.1):
"""
Follows the convention:
Each layer in the MLP is specified (hidden-dim, layer-type)
so a [(10, tanh), (20, tanh)] with outputsize 2
implies a set of parameters
W_1 (10 x 20), b_1 (20 x 1)
W_2 (20 x 2), b_2 (2 x 1)
"""
#first construct all non-output layers
hidden_dims = [l[0] for l in layers]
shapes = zip(hidden_dims[:-1], hidden_dims[1:])
hidden_nonlins = [l[1] for l in layers[:-1]]
hidden_params = [init_layer(shape, l, layer_name="%d"%i, scale=init_scale)
for i, (shape, l) in enumerate(zip(shapes, hidden_nonlins))]
# construct (potentially) multi-output layer
out_nonlins = layers[-1][1:]
out_shape = (hidden_dims[-1], out_dim)
out_params = [init_layer(out_shape, l, layer_name="out_%d"%i, scale=init_scale)
for i, l in enumerate(out_nonlins)]
# string together hidden layers and output layer
hmlp = compose(l(W, b) for (W, b), l in zip(hidden_params, hidden_nonlins))
output = [l(W,b) for (W,b), l in zip(out_params, out_nonlins)]
# TODO find a nicer way to output a list vs single
if len(output) == 1:
def mlp(X):
return output[0](hmlp(X))
else:
def mlp(X):
h = hmlp(X)
return [o(h) for o in output]
return mlp, hidden_params + [out_params]
| mit |
d9w/6858-android-intents | analyzer/androguard/core/androgen.py | 7 | 7936 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from androguard.core import androconf
from androguard.core.bytecodes import jvm
from androguard.core.bytecodes import dvm
from androguard.core.bytecodes import apk
from androguard.core.analysis import analysis
from androguard.core.analysis import ganalysis
class BC :
def __init__(self, bc) :
self.__bc = bc
def get_vm(self) :
return self.__bc
def get_analysis(self) :
return self.__a
def analyze(self) :
self.__a = analysis.uVMAnalysis( self.__bc )
self.__bc.set_vmanalysis( self.__a )
self.__g = ganalysis.GVMAnalysis( self.__a, None )
self.__bc.set_gvmanalysis( self.__g )
self.__bc.create_xref()
self.__bc.create_dref()
def _get(self, val, name) :
l = []
r = getattr(self.__bc, val)(name)
for i in r :
l.append( i )
return l
def _gets(self, val) :
l = []
r = getattr(self.__bc, val)()
for i in r :
l.append( i )
return l
def gets(self, name) :
return self._gets("get_" + name)
def get(self, val, name) :
return self._get("get_" + val, name)
def insert_direct_method(self, name, method) :
return self.__bc.insert_direct_method(name, method)
def insert_craft_method(self, name, proto, codes) :
return self.__bc.insert_craft_method( name, proto, codes)
def show(self) :
self.__bc.show()
def pretty_show(self) :
self.__bc.pretty_show()
def save(self) :
return self.__bc.save()
def __getattr__(self, value) :
return getattr(self.__bc, value)
class Androguard:
"""Androguard is the main object to abstract and manage differents formats
@param files : a list of filenames (filename must be terminated by .class or .dex)
@param raw : specify if the filename is in fact a raw buffer (default : False) #FIXME
"""
def __init__(self, files, raw=False) :
self.__files = files
self.__orig_raw = {}
for i in self.__files :
self.__orig_raw[ i ] = open(i, "rb").read()
self.__bc = []
self._analyze()
def _iterFlatten(self, root):
if isinstance(root, (list, tuple)):
for element in root :
for e in self._iterFlatten(element) :
yield e
else:
yield root
def _analyze(self) :
for i in self.__files :
ret_type = androconf.is_android( i )
if ret_type == "APK" :
x = apk.APK( i )
bc = dvm.DalvikVMFormat( x.get_dex() )
elif ret_type == "DEX" :
bc = dvm.DalvikVMFormat( open(i, "rb").read() )
elif ret_type == "DEY" :
bc = dvm.DalvikOdexVMFormat( open(i, "rb").read() )
elif ret_type == "ELF" :
from androguard.core.binaries import elf
bc = elf.ELF( open(i, "rb").read() )
else :
raise( "Unknown format" )
if isinstance(bc, list) :
for j in bc :
self.__bc.append( (j[0], BC( jvm.JVMFormat(j[1]) ) ) )
else :
self.__bc.append( (i, BC( bc )) )
def ianalyze(self) :
for i in self.get_bc() :
i[1].analyze()
def get_class(self, class_name) :
for _, bc in self.__bc :
if bc.get_class(class_name) == True :
return bc
return None
def get_raw(self) :
"""Return raw format of all file"""
l = []
for _, bc in self.__bc :
l.append( bc._get_raw() )
return l
def get_orig_raw(self) :
return self.__orig_raw
def get_method_descriptor(self, class_name, method_name, descriptor) :
"""
Return the specific method
@param class_name : the class name of the method
@param method_name : the name of the method
@param descriptor : the descriptor of the method
"""
for file_name, bc in self.__bc :
x = bc.get_method_descriptor( class_name, method_name, descriptor )
if x != None :
return x, bc
return None, None
def get_field_descriptor(self, class_name, field_name, descriptor) :
"""
Return the specific field
@param class_name : the class name of the field
@param field_name : the name of the field
@param descriptor : the descriptor of the field
"""
for file_name, bc in self.__bc :
x = bc.get_field_descriptor( class_name, field_name, descriptor )
if x != None :
return x, bc
return None, None
def get(self, name, val) :
"""
Return the specific value for all files
@param name :
@param val :
"""
if name == "file" :
for file_name, bc in self.__bc :
if file_name == val :
return bc
return None
else :
l = []
for file_name, bc in self.__bc :
l.append( bc.get( name, val ) )
return list( self._iterFlatten(l) )
def gets(self, name) :
"""
Return the specific value for all files
@param name :
"""
l = []
for file_name, bc in self.__bc :
l.append( bc.gets( name ) )
return list( self._iterFlatten(l) )
def get_vms(self) :
return [ i[1].get_vm() for i in self.__bc ]
def get_bc(self) :
return self.__bc
def show(self) :
"""
Display all files
"""
for _, bc in self.__bc :
bc.show()
def pretty_show(self) :
"""
Display all files
"""
for _, bc in self.__bc :
bc.pretty_show()
class AndroguardS :
"""AndroguardS is the main object to abstract and manage differents formats but only per filename. In fact this class is just a wrapper to the main class Androguard
@param filename : the filename to use (filename must be terminated by .class or .dex)
@param raw : specify if the filename is a raw buffer (default : False)
"""
def __init__(self, filename, raw=False) :
self.__filename = filename
self.__orig_a = Androguard( [ filename ], raw )
self.__a = self.__orig_a.get( "file", filename )
def get_orig_raw(self) :
return self.__orig_a.get_orig_raw()[ self.__filename ]
def get_vm(self) :
"""
This method returns the VMFormat which correspond to the file
@rtype: L{jvm.JVMFormat} or L{dvm.DalvikVMFormat}
"""
return self.__a.get_vm()
def save(self) :
"""
Return the original format (with the modifications) into raw format
@rtype: string
"""
return self.__a.save()
def __getattr__(self, value) :
try :
return getattr(self.__orig_a, value)
except AttributeError :
return getattr(self.__a, value)
| mit |
jruiperezv/ANALYSE | lms/djangoapps/courseware/features/staff_debug_info.py | 173 | 1472 | """
Steps for staff_debug_info.feature lettuce tests
"""
from django.contrib.auth.models import User
from lettuce import world, step
from common import create_course, course_id
from courseware.courses import get_course_by_id
from instructor.access import allow_access
@step(u'i am staff member for the course "([^"]*)"$')
def i_am_staff_member_for_the_course(step, course_number):
# Create the course
create_course(step, course_number)
course = get_course_by_id(course_id(course_number))
# Create the user
world.create_user('robot', 'test')
user = User.objects.get(username='robot')
# Add user as a course staff.
allow_access(course, user, "staff")
world.log_in(username='robot', password='test')
@step(u'I can view staff debug info')
def view_staff_debug_info(step):
css_selector = "a.instructor-info-action"
world.css_click(css_selector)
world.wait_for_visible("section.staff-modal")
@step(u'I can reset student attempts')
def view_staff_debug_info(step):
css_selector = "a.staff-debug-reset"
world.css_click(css_selector)
world.wait_for_ajax_complete()
@step(u'I cannot see delete student state link')
def view_staff_debug_info(step):
css_selector = "a.staff-debug-sdelete"
world.is_css_not_present(css_selector)
@step(u'I cannot see rescore student submission link')
def view_staff_debug_info(step):
css_selector = "a.staff-debug-rescore"
world.is_css_not_present(css_selector)
| agpl-3.0 |
andrebellafronte/stoq | stoqlib/lib/pdf.py | 4 | 1127 | # -*- Mode: Python; coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2011 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import os
def pdftohtml(filename, output):
# FIXME: Change this to use popen
return os.system('pdftohtml -stdout -xml -noframes -i -q %s | '
'xmllint --format - > %s' % (filename, output))
| gpl-2.0 |
steveb/heat | heat/tests/test_grouputils.py | 6 | 3180 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import grouputils
from heat.common import template_format
from heat.engine import rsrc_defn
from heat.tests import common
from heat.tests import utils
nested_stack = '''
heat_template_version: 2013-05-23
resources:
r0:
type: OverwrittenFnGetRefIdType
r1:
type: OverwrittenFnGetRefIdType
'''
class GroupUtilsTest(common.HeatTestCase):
def test_non_nested_resource(self):
group = mock.Mock()
self.patchobject(group, 'nested', return_value=None)
self.assertEqual(0, grouputils.get_size(group))
self.assertEqual([], grouputils.get_members(group))
self.assertEqual([], grouputils.get_member_refids(group))
self.assertEqual([], grouputils.get_member_names(group))
def test_normal_group(self):
group = mock.Mock()
t = template_format.parse(nested_stack)
stack = utils.parse_stack(t)
# group size
self.patchobject(group, 'nested', return_value=stack)
self.assertEqual(2, grouputils.get_size(group))
# member list (sorted)
members = [r for r in six.itervalues(stack)]
expected = sorted(members, key=lambda r: (r.created_time, r.name))
actual = grouputils.get_members(group)
self.assertEqual(expected, actual)
# refids
actual_ids = grouputils.get_member_refids(group)
self.assertEqual(['ID-r0', 'ID-r1'], actual_ids)
partial_ids = grouputils.get_member_refids(group, exclude=['ID-r1'])
self.assertEqual(['ID-r0'], partial_ids)
# names
names = grouputils.get_member_names(group)
self.assertEqual(['r0', 'r1'], names)
# defn snippets as list
expected = rsrc_defn.ResourceDefinition(
None,
"OverwrittenFnGetRefIdType")
member_defs = grouputils.get_member_definitions(group)
self.assertEqual([(x, expected) for x in names], member_defs)
def test_group_with_failed_members(self):
group = mock.Mock()
t = template_format.parse(nested_stack)
stack = utils.parse_stack(t)
self.patchobject(group, 'nested', return_value=stack)
# Just failed for whatever reason
rsrc_err = stack.resources['r0']
rsrc_err.status = rsrc_err.FAILED
rsrc_ok = stack.resources['r1']
self.assertEqual(1, grouputils.get_size(group))
self.assertEqual([rsrc_ok], grouputils.get_members(group))
self.assertEqual(['ID-r1'], grouputils.get_member_refids(group))
self.assertEqual(['r1'], grouputils.get_member_names(group))
| apache-2.0 |
GastonLab/ddb-scripts | defunct/workflow-RNA-Seq_Salmon_Unpaired.py | 3 | 1506 | #!/usr/bin/env python
# Standard packages
import sys
import argparse
# Third-party packages
from toil.job import Job
# Package methods
from ddb import configuration
from ddb_ngsflow import pipeline
from ddb_ngsflow.rna import salmon
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
# Workflow Graph definition. The following workflow definition should create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs, cores=1)
# Per sample jobs
for sample in samples:
# Alignment and Refinement Stages
align_job = Job.wrapJobFn(salmon.salmonEM_unpaired, config, sample, samples,
cores=int(config['salmon']['num_cores']),
memory="{}G".format(config['salmon']['max_mem']))
# Create workflow from created jobs
root_job.addChild(align_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
| mit |
mosbasik/buzhug | javasrc/lib/Jython/Lib/encodings/latin_1.py | 853 | 1264 | """ Python 'latin-1' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.latin_1_encode
decode = codecs.latin_1_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.latin_1_encode(input,self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.latin_1_decode(input,self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.latin_1_decode
decode = codecs.latin_1_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-3-clause |
jn7163/django | tests/flatpages_tests/test_templatetags.py | 309 | 7111 | from django.contrib.auth.models import AnonymousUser, User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageTemplateTagTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
def test_get_flatpages_tag(self):
"The flatpage template tag retrieves unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"The flatpage template tag retrieves unregistered flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_user(self):
"The flatpage template tag retrieves all flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,")
def test_get_flatpages_with_prefix(self):
"The flatpage template tag retrieves unregistered prefixed flatpages by default"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"The flatpage template tag retrieves unregistered prefixed flatpages for an anonymous user"
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'anonuser': AnonymousUser()
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_user(self):
"The flatpage template tag retrieve prefixed flatpages for an authenticated user"
me = User.objects.create_user('testuser', '[email protected]', 's3krit')
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'me': me
}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({
'location_prefix': '/location/'
}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
render = lambda t: Template(t).render(Context())
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages as flatpages asdf %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages for user as flatpages asdf %}")
self.assertRaises(TemplateSyntaxError, render,
"{% load flatpages %}{% get_flatpages prefix for user as flatpages asdf %}")
| bsd-3-clause |
nkgilley/home-assistant | homeassistant/components/onvif/camera.py | 6 | 6736 | """Support for ONVIF Cameras with FFmpeg as decoder."""
import asyncio
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import requests
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, DATA_FFMPEG
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from .base import ONVIFBaseEntity
from .const import (
ABSOLUTE_MOVE,
ATTR_CONTINUOUS_DURATION,
ATTR_DISTANCE,
ATTR_MOVE_MODE,
ATTR_PAN,
ATTR_PRESET,
ATTR_SPEED,
ATTR_TILT,
ATTR_ZOOM,
CONF_RTSP_TRANSPORT,
CONTINUOUS_MOVE,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_UP,
DOMAIN,
GOTOPRESET_MOVE,
LOGGER,
RELATIVE_MOVE,
SERVICE_PTZ,
ZOOM_IN,
ZOOM_OUT,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ONVIF camera video stream."""
platform = entity_platform.current_platform.get()
# Create PTZ service
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Optional(ATTR_PAN): vol.In([DIR_LEFT, DIR_RIGHT]),
vol.Optional(ATTR_TILT): vol.In([DIR_UP, DIR_DOWN]),
vol.Optional(ATTR_ZOOM): vol.In([ZOOM_OUT, ZOOM_IN]),
vol.Optional(ATTR_DISTANCE, default=0.1): cv.small_float,
vol.Optional(ATTR_SPEED, default=0.5): cv.small_float,
vol.Optional(ATTR_MOVE_MODE, default=RELATIVE_MOVE): vol.In(
[CONTINUOUS_MOVE, RELATIVE_MOVE, ABSOLUTE_MOVE, GOTOPRESET_MOVE]
),
vol.Optional(ATTR_CONTINUOUS_DURATION, default=0.5): cv.small_float,
vol.Optional(ATTR_PRESET, default="0"): cv.string,
},
"async_perform_ptz",
)
device = hass.data[DOMAIN][config_entry.unique_id]
async_add_entities(
[ONVIFCameraEntity(device, profile) for profile in device.profiles]
)
return True
class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
"""Representation of an ONVIF camera."""
def __init__(self, device, profile):
"""Initialize ONVIF camera entity."""
ONVIFBaseEntity.__init__(self, device, profile)
Camera.__init__(self)
self.stream_options[CONF_RTSP_TRANSPORT] = device.config_entry.options.get(
CONF_RTSP_TRANSPORT
)
self._stream_uri = None
self._snapshot_uri = None
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_STREAM
@property
def name(self) -> str:
"""Return the name of this camera."""
return f"{self.device.name} - {self.profile.name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self.profile.index:
return f"{self.device.info.mac or self.device.info.serial_number}_{self.profile.index}"
return self.device.info.mac or self.device.info.serial_number
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.device.max_resolution == self.profile.video.resolution.width
async def stream_source(self):
"""Return the stream source."""
return self._stream_uri
async def async_camera_image(self):
"""Return a still image response from the camera."""
image = None
if self.device.capabilities.snapshot:
auth = None
if self.device.username and self.device.password:
auth = HTTPDigestAuth(self.device.username, self.device.password)
def fetch():
"""Read image from a URL."""
try:
response = requests.get(self._snapshot_uri, timeout=5, auth=auth)
if response.status_code < 300:
return response.content
except requests.exceptions.RequestException as error:
LOGGER.error(
"Fetch snapshot image failed from %s, falling back to FFmpeg; %s",
self.device.name,
error,
)
return None
image = await self.hass.async_add_executor_job(fetch)
if image is None:
ffmpeg = ImageFrame(self.hass.data[DATA_FFMPEG].binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._stream_uri,
output_format=IMAGE_JPEG,
extra_cmd=self.device.config_entry.options.get(
CONF_EXTRA_ARGUMENTS
),
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
LOGGER.debug("Handling mjpeg stream from camera '%s'", self.device.name)
ffmpeg_manager = self.hass.data[DATA_FFMPEG]
stream = CameraMjpeg(ffmpeg_manager.binary, loop=self.hass.loop)
await stream.open_camera(
self._stream_uri,
extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS),
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
ffmpeg_manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
uri_no_auth = await self.device.async_get_stream_uri(self.profile)
self._stream_uri = uri_no_auth.replace(
"rtsp://", f"rtsp://{self.device.username}:{self.device.password}@", 1
)
if self.device.capabilities.snapshot:
self._snapshot_uri = await self.device.async_get_snapshot_uri(self.profile)
async def async_perform_ptz(
self,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
) -> None:
"""Perform a PTZ action on the camera."""
await self.device.async_perform_ptz(
self.profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan,
tilt,
zoom,
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.