repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
peterm-itr/edx-platform | cms/djangoapps/contentstore/views/tests/test_group_configurations.py | 8 | 23216 | """
Group Configuration Tests.
"""
import json
from mock import patch
from contentstore.utils import reverse_course_url, reverse_usage_url
from contentstore.views.component import SPLIT_TEST_COMPONENT_TYPE
from contentstore.views.course import GroupConfiguration
from contentstore.tests.utils import CourseTestCase
from xmodule.partitions.partitions import Group, UserPartition
from xmodule.modulestore.tests.factories import ItemFactory
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
GROUP_CONFIGURATION_JSON = {
u'name': u'Test name',
u'description': u'Test description',
u'groups': [
{u'name': u'Group A'},
{u'name': u'Group B'},
],
}
# pylint: disable=no-member
class HelperMethods(object):
"""
Mixin that provides useful methods for Group Configuration tests.
"""
def _create_content_experiment(self, cid=-1, name_suffix=''):
"""
Create content experiment.
Assign Group Configuration to the experiment if cid is provided.
"""
vertical = ItemFactory.create(
category='vertical',
parent_location=self.course.location,
display_name='Test Unit {}'.format(name_suffix)
)
c0_url = self.course.id.make_usage_key("vertical", "split_test_cond0")
c1_url = self.course.id.make_usage_key("vertical", "split_test_cond1")
c2_url = self.course.id.make_usage_key("vertical", "split_test_cond2")
split_test = ItemFactory.create(
category='split_test',
parent_location=vertical.location,
user_partition_id=cid,
display_name='Test Content Experiment {}'.format(name_suffix),
group_id_to_child={"0": c0_url, "1": c1_url, "2": c2_url}
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 0 vertical",
location=c0_url,
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 1 vertical",
location=c1_url,
)
ItemFactory.create(
parent_location=split_test.location,
category="vertical",
display_name="Condition 2 vertical",
location=c2_url,
)
partitions_json = [p.to_json() for p in self.course.user_partitions]
self.client.ajax_post(
reverse_usage_url("xblock_handler", split_test.location),
data={'metadata': {'user_partitions': partitions_json}}
)
self.save_course()
return (vertical, split_test)
def _add_user_partitions(self, count=1):
"""
Create user partitions for the course.
"""
partitions = [
UserPartition(
i, 'Name ' + str(i), 'Description ' + str(i), [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')]
) for i in xrange(0, count)
]
self.course.user_partitions = partitions
self.save_course()
# pylint: disable=no-member
class GroupConfigurationsBaseTestCase(object):
"""
Mixin with base test cases for the group configurations.
"""
def _remove_ids(self, content):
"""
Remove ids from the response. We cannot predict IDs, because they're
generated randomly.
We use this method to clean up response when creating new group configurations.
Returns a tuple that contains removed group configuration ID and group IDs.
"""
configuration_id = content.pop("id")
group_ids = [group.pop("id") for group in content["groups"]]
return (configuration_id, group_ids)
def test_required_fields_are_absent(self):
"""
Test required fields are absent.
"""
bad_jsons = [
# must have name of the configuration
{
u'description': 'Test description',
u'groups': [
{u'name': u'Group A'},
{u'name': u'Group B'},
],
},
# must have at least one group
{
u'name': u'Test name',
u'description': u'Test description',
u'groups': [],
},
# an empty json
{},
]
for bad_json in bad_jsons:
response = self.client.post(
self._url(),
data=json.dumps(bad_json),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content)
self.assertIn("error", content)
def test_invalid_json(self):
"""
Test invalid json handling.
"""
# No property name.
invalid_json = "{u'name': 'Test Name', []}"
response = self.client.post(
self._url(),
data=invalid_json,
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
self.assertNotIn("Location", response)
content = json.loads(response.content)
self.assertIn("error", content)
# pylint: disable=no-member
class GroupConfigurationsListHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_list_handler.
"""
def setUp(self):
"""
Set up GroupConfigurationsListHandlerTestCase.
"""
super(GroupConfigurationsListHandlerTestCase, self).setUp()
def _url(self):
"""
Return url for the handler.
"""
return reverse_course_url('group_configurations_list_handler', self.course.id)
def test_view_index_ok(self):
"""
Basic check that the groups configuration page responds correctly.
"""
self.course.user_partitions = [
UserPartition(0, 'First name', 'First description', [Group(0, 'Group A'), Group(1, 'Group B'), Group(2, 'Group C')]),
]
self.save_course()
if SPLIT_TEST_COMPONENT_TYPE not in self.course.advanced_modules:
self.course.advanced_modules.append(SPLIT_TEST_COMPONENT_TYPE)
self.store.update_item(self.course, self.user.id)
response = self.client.get(self._url())
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'First name')
self.assertContains(response, 'Group C')
def test_view_index_disabled(self):
"""
Check that group configuration page is not displayed when turned off.
"""
if SPLIT_TEST_COMPONENT_TYPE in self.course.advanced_modules:
self.course.advanced_modules.remove(SPLIT_TEST_COMPONENT_TYPE)
self.store.update_item(self.course, self.user.id)
resp = self.client.get(self._url())
self.assertContains(resp, "module is disabled")
def test_unsupported_http_accept_header(self):
"""
Test if not allowed header present in request.
"""
response = self.client.get(
self._url(),
HTTP_ACCEPT="text/plain",
)
self.assertEqual(response.status_code, 406)
def test_can_create_group_configuration(self):
"""
Test that you can create a group configuration.
"""
expected = {
u'description': u'Test description',
u'name': u'Test name',
u'version': 1,
u'groups': [
{u'name': u'Group A', u'version': 1},
{u'name': u'Group B', u'version': 1},
],
}
response = self.client.post(
self._url(),
data=json.dumps(GROUP_CONFIGURATION_JSON),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 201)
self.assertIn("Location", response)
content = json.loads(response.content)
configuration_id, group_ids = self._remove_ids(content) # pylint: disable=unused-variable
self.assertEqual(content, expected)
# IDs are unique
self.assertEqual(len(group_ids), len(set(group_ids)))
self.assertEqual(len(group_ids), 2)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'Group A')
self.assertEqual(user_partititons[0].groups[1].name, u'Group B')
# pylint: disable=no-member
class GroupConfigurationsDetailHandlerTestCase(CourseTestCase, GroupConfigurationsBaseTestCase, HelperMethods):
"""
Test cases for group_configurations_detail_handler.
"""
ID = 0
def _url(self, cid=-1):
"""
Return url for the handler.
"""
cid = cid if cid > 0 else self.ID
return reverse_course_url(
'group_configurations_detail_handler',
self.course.id,
kwargs={'group_configuration_id': cid},
)
def test_can_create_new_group_configuration_if_it_is_not_exist(self):
"""
PUT new group configuration when no configurations exist in the course.
"""
expected = {
u'id': 999,
u'name': u'Test name',
u'description': u'Test description',
u'version': 1,
u'groups': [
{u'id': 0, u'name': u'Group A', u'version': 1},
{u'id': 1, u'name': u'Group B', u'version': 1},
],
u'usage': [],
}
response = self.client.put(
self._url(cid=999),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content)
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions in the course contains the new group configuration.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'Group A')
self.assertEqual(user_partititons[0].groups[1].name, u'Group B')
def test_can_edit_group_configuration(self):
"""
Edit group configuration and check its id and modified fields.
"""
self._add_user_partitions()
self.save_course()
expected = {
u'id': self.ID,
u'name': u'New Test name',
u'description': u'New Test description',
u'version': 1,
u'groups': [
{u'id': 0, u'name': u'New Group Name', u'version': 1},
{u'id': 2, u'name': u'Group C', u'version': 1},
],
u'usage': [],
}
response = self.client.put(
self._url(),
data=json.dumps(expected),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
content = json.loads(response.content)
self.assertEqual(content, expected)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, u'New Test name')
self.assertEqual(len(user_partititons[0].groups), 2)
self.assertEqual(user_partititons[0].groups[0].name, u'New Group Name')
self.assertEqual(user_partititons[0].groups[1].name, u'Group C')
def test_can_delete_group_configuration(self):
"""
Delete group configuration and check user partitions.
"""
self._add_user_partitions(count=2)
self.save_course()
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 204)
self.reload_course()
# Verify that user_partitions is properly updated in the course.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 1)
self.assertEqual(user_partititons[0].name, 'Name 1')
def test_cannot_delete_used_group_configuration(self):
"""
Cannot delete group configuration if it is in use.
"""
self._add_user_partitions(count=2)
self._create_content_experiment(cid=0)
response = self.client.delete(
self._url(cid=0),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertTrue(content['error'])
self.reload_course()
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
def test_cannot_delete_non_existent_group_configuration(self):
"""
Cannot delete group configuration if it is doesn't exist.
"""
self._add_user_partitions(count=2)
response = self.client.delete(
self._url(cid=999),
content_type="application/json",
HTTP_ACCEPT="application/json",
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 404)
# Verify that user_partitions is still the same.
user_partititons = self.course.user_partitions
self.assertEqual(len(user_partititons), 2)
self.assertEqual(user_partititons[0].name, 'Name 0')
# pylint: disable=no-member
class GroupConfigurationsUsageInfoTestCase(CourseTestCase, HelperMethods):
"""
Tests for usage information of configurations.
"""
def setUp(self):
super(GroupConfigurationsUsageInfoTestCase, self).setUp()
def test_group_configuration_not_used(self):
"""
Test that right data structure will be created if group configuration is not used.
"""
self._add_user_partitions()
actual = GroupConfiguration.add_usage_info(self.course, self.store)
expected = [{
'id': 0,
'name': 'Name 0',
'description': 'Description 0',
'version': 1,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
}]
self.assertEqual(actual, expected)
def test_can_get_correct_usage_info(self):
"""
Test if group configurations json updated successfully with usage information.
"""
self._add_user_partitions(count=2)
vertical, __ = self._create_content_experiment(cid=0, name_suffix='0')
self._create_content_experiment(name_suffix='1')
actual = GroupConfiguration.add_usage_info(self.course, self.store)
expected = [{
'id': 0,
'name': 'Name 0',
'description': 'Description 0',
'version': 1,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': '/container/{}'.format(vertical.location),
'label': 'Test Unit 0 / Test Content Experiment 0',
'validation': None,
}],
}, {
'id': 1,
'name': 'Name 1',
'description': 'Description 1',
'version': 1,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [],
}]
self.assertEqual(actual, expected)
def test_can_use_one_configuration_in_multiple_experiments(self):
"""
Test if multiple experiments are present in usage info when they use same
group configuration.
"""
self._add_user_partitions()
vertical, __ = self._create_content_experiment(cid=0, name_suffix='0')
vertical1, __ = self._create_content_experiment(cid=0, name_suffix='1')
actual = GroupConfiguration.add_usage_info(self.course, self.store)
expected = [{
'id': 0,
'name': 'Name 0',
'description': 'Description 0',
'version': 1,
'groups': [
{'id': 0, 'name': 'Group A', 'version': 1},
{'id': 1, 'name': 'Group B', 'version': 1},
{'id': 2, 'name': 'Group C', 'version': 1},
],
'usage': [{
'url': '/container/{}'.format(vertical.location),
'label': 'Test Unit 0 / Test Content Experiment 0',
'validation': None,
}, {
'url': '/container/{}'.format(vertical1.location),
'label': 'Test Unit 1 / Test Content Experiment 1',
'validation': None,
}],
}]
self.assertEqual(actual, expected)
def test_can_handle_without_parent(self):
"""
Test if it possible to handle case when split_test has no parent.
"""
self._add_user_partitions()
# Create split test without parent.
with modulestore().branch_setting(ModuleStoreEnum.Branch.published_only):
orphan = modulestore().create_item(
ModuleStoreEnum.UserID.test,
self.course.id, 'split_test',
)
orphan.user_partition_id = 0
orphan.display_name = 'Test Content Experiment'
modulestore().update_item(orphan, ModuleStoreEnum.UserID.test)
self.save_course()
actual = GroupConfiguration.get_usage_info(self.course, self.store)
self.assertEqual(actual, {0: []})
class GroupConfigurationsValidationTestCase(CourseTestCase, HelperMethods):
"""
Tests for validation in Group Configurations.
"""
def setUp(self):
super(GroupConfigurationsValidationTestCase, self).setUp()
@patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test')
def verify_validation_add_usage_info(self, expected_result, mocked_message, mocked_validation_messages):
"""
Helper method for testing validation information present after add_usage_info.
"""
self._add_user_partitions()
split_test = self._create_content_experiment(cid=0, name_suffix='0')[1]
validation = StudioValidation(split_test.location)
validation.add(mocked_message)
mocked_validation_messages.return_value = validation
group_configuration = GroupConfiguration.add_usage_info(self.course, self.store)[0]
self.assertEqual(expected_result.to_json(), group_configuration['usage'][0]['validation'])
def test_error_message_present(self):
"""
Tests if validation message is present (error case).
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.ERROR, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.ERROR, u"This content experiment has issues that affect content visibility."
)
self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter
def test_warning_message_present(self):
"""
Tests if validation message is present (warning case).
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility."
)
self.verify_validation_add_usage_info(expected_result, mocked_message) # pylint: disable=no-value-for-parameter
@patch('xmodule.split_test_module.SplitTestDescriptor.validate_split_test')
def verify_validation_update_usage_info(self, expected_result, mocked_message, mocked_validation_messages):
"""
Helper method for testing validation information present after update_usage_info.
"""
self._add_user_partitions()
split_test = self._create_content_experiment(cid=0, name_suffix='0')[1]
validation = StudioValidation(split_test.location)
if mocked_message is not None:
validation.add(mocked_message)
mocked_validation_messages.return_value = validation
group_configuration = GroupConfiguration.update_usage_info(
self.store, self.course, self.course.user_partitions[0]
)
self.assertEqual(
expected_result.to_json() if expected_result is not None else None,
group_configuration['usage'][0]['validation']
)
def test_update_usage_info(self):
"""
Tests if validation message is present when updating usage info.
"""
mocked_message = StudioValidationMessage(StudioValidationMessage.WARNING, u"Validation message")
expected_result = StudioValidationMessage(
StudioValidationMessage.WARNING, u"This content experiment has issues that affect content visibility."
)
# pylint: disable=no-value-for-parameter
self.verify_validation_update_usage_info(expected_result, mocked_message)
def test_update_usage_info_no_message(self):
"""
Tests if validation message is not present when updating usage info.
"""
self.verify_validation_update_usage_info(None, None) # pylint: disable=no-value-for-parameter
| agpl-3.0 | 8,852,526,276,621,919,000 | 36.749593 | 129 | 0.590412 | false |
eagleamon/home-assistant | homeassistant/components/device_tracker/aruba.py | 13 | 4464 | """
Support for Aruba Access Points.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.aruba/
"""
import logging
import re
import threading
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.util import Throttle
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQUIREMENTS = ['pexpect==4.0.1']
_LOGGER = logging.getLogger(__name__)
_DEVICES_REGEX = re.compile(
r'(?P<name>([^\s]+))\s+' +
r'(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})\s+' +
r'(?P<mac>(([0-9a-f]{2}[:-]){5}([0-9a-f]{2})))\s+')
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string
})
# pylint: disable=unused-argument
def get_scanner(hass, config):
"""Validate the configuration and return a Aruba scanner."""
scanner = ArubaDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class ArubaDeviceScanner(DeviceScanner):
"""This class queries a Aruba Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.lock = threading.Lock()
self.last_results = {}
# Test the router is accessible.
data = self.get_aruba_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client['mac'] for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client['mac'] == device:
return client['name']
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Ensure the information from the Aruba Access Point is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
data = self.get_aruba_data()
if not data:
return False
self.last_results = data.values()
return True
def get_aruba_data(self):
"""Retrieve data from Aruba Access Point and return parsed result."""
import pexpect
connect = 'ssh {}@{}'
ssh = pexpect.spawn(connect.format(self.username, self.host))
query = ssh.expect(['password:', pexpect.TIMEOUT, pexpect.EOF,
'continue connecting (yes/no)?',
'Host key verification failed.',
'Connection refused',
'Connection timed out'], timeout=120)
if query == 1:
_LOGGER.error('Timeout')
return
elif query == 2:
_LOGGER.error('Unexpected response from router')
return
elif query == 3:
ssh.sendline('yes')
ssh.expect('password:')
elif query == 4:
_LOGGER.error('Host key Changed')
return
elif query == 5:
_LOGGER.error('Connection refused by server')
return
elif query == 6:
_LOGGER.error('Connection timed out')
return
ssh.sendline(self.password)
ssh.expect('#')
ssh.sendline('show clients')
ssh.expect('#')
devices_result = ssh.before.split(b'\r\n')
ssh.sendline('exit')
devices = {}
for device in devices_result:
match = _DEVICES_REGEX.search(device.decode('utf-8'))
if match:
devices[match.group('ip')] = {
'ip': match.group('ip'),
'mac': match.group('mac').upper(),
'name': match.group('name')
}
return devices
| apache-2.0 | 3,901,769,087,074,137,600 | 31.347826 | 77 | 0.584677 | false |
niekas/Hack4LT | src/hack4lt/views/task.py | 1 | 8643 | from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.views.generic import CreateView, UpdateView, DeleteView, ListView, DetailView
from hack4lt.forms import (
CommentForm,
Task1Form,
Task2Form,
TaskAplinkaResultForm,
TaskInfoForm,
TaskPythonResultForm,
TaskResultForm,
TaskSeminarasResultForm,
)
from hack4lt.models import (
Topic,
TaskComment,
TaskInfo,
TaskResult,
TaskAplinkaResult,
TaskPythonResult,
TaskSeminarasResult,
)
from hack4lt.views.account import AdminRequiredMixin, LoginRequiredMixin
from hack4lt.utils import slugify
class UserMixin(object):
def form_valid(self, form):
response = super(UserMixin, self).form_valid(form)
form.instance.user = self.request.user
form.instance.save()
return response
class TaskInfoCreate(UserMixin, AdminRequiredMixin, CreateView):
model = TaskInfo
form_class = TaskInfoForm
template_name = 'hack4lt/form.html'
success_url = reverse_lazy('tasks')
class TaskInfoUpdate(UserMixin, AdminRequiredMixin, UpdateView):
model = TaskInfo
form_class = TaskInfoForm
template_name = 'hack4lt/form.html'
success_url = reverse_lazy('tasks')
class TaskInfoList(LoginRequiredMixin, ListView):
model = TaskInfo
paginate_by = 30
template_name = 'hack4lt/task_list.html'
success_url = reverse_lazy('tasks')
def get_context_data(self, **kwargs):
context = super(TaskInfoList, self).get_context_data(**kwargs)
user_tasks = TaskResult.objects.filter(user=self.request.user)
context['tasks_done'] = dict(user_tasks.filter(done=True).
values_list('task__pk', 'total_points'))
return context
class TaskInfoDelete(AdminRequiredMixin, DeleteView):
model = TaskInfo
success_url = reverse_lazy('tasks')
class TaskResultCreate(UserMixin, CreateView):
template_name = 'hack4lt/task_result_form.html'
success_url = reverse_lazy('tasks')
def get_context_data(self, **kwargs):
context = super(TaskResultCreate, self).get_context_data(**kwargs)
context['task'] = TaskInfo.objects.get(slug=self.kwargs.get('slug'))
return context
def get_object(self, queryset=None):
task = TaskInfo.objects.get(slug=self.kwargs['slug'])
return eval('Task%sResult()' % slugify(unicode(task.slug)).capitalize())
def form_valid(self, form):
response = super(TaskResultCreate, self).form_valid(form)
form.instance.task = TaskInfo.objects.get(slug=self.kwargs.get('slug'))
form.save()
return response
def get_form_class(self):
task = TaskInfo.objects.get(slug=self.kwargs['slug'])
return eval('Task%sResultForm' % slugify(unicode(task.slug)).capitalize())
class TopicCreate(LoginRequiredMixin, CreateView):
model = Topic
success_url = reverse_lazy('topics')
class TopicList(LoginRequiredMixin, ListView):
model = Topic
paginate_by = 30
success_url = reverse_lazy('topics')
class TaskResultUpdate(UserMixin, UpdateView):
template_name = 'hack4lt/task_result_form.html'
success_url = reverse_lazy('tasks')
def get_context_data(self, **kwargs):
context = super(TaskResultUpdate, self).get_context_data(**kwargs)
slug = self.kwargs.get('slug')
user = self.request.user
task_info = TaskInfo.objects.get(slug=slug)
context['task'] = task_info
task = TaskResult.objects.get(user=user, task=task_info)
if task:
context['comments'] = task.taskcomment_set.all()
context['comment_form'] = CommentForm()
return context
def get_object(self, queryset=None):
task_slug = self.kwargs.get('slug')
user = self.request.user
task_objs = TaskResult.objects.filter(user=user, task__slug=task_slug)
if not task_objs.exists():
raise Http404
task = task_objs.order_by('-created')[0]
return getattr(task, 'task%sresult' % slugify(unicode(task.task.slug)))
def form_valid(self, form):
response = super(TaskResultUpdate, self).form_valid(form)
form.instance.task = TaskInfo.objects.get(slug=self.kwargs.get('slug'))
form.save()
return response
def get_form_class(self):
task = TaskInfo.objects.get(slug=self.kwargs['slug'])
return eval('Task%sResultForm' % slugify(unicode(task.slug)).capitalize())
class TaskResultDetail(UserMixin, DetailView):
template_name = 'hack4lt/task_result_form.html'
success_url = reverse_lazy('tasks')
def get_context_data(self, **kwargs):
context = super(TaskResultDetail, self).get_context_data(**kwargs)
context['task'] = TaskInfo.objects.get(slug=self.kwargs.get('slug'))
return context
def get_object(self, queryset=None):
return None
def get_task_form(slug, user):
task_class = eval('Task%sResult' % slugify(unicode(slug)).capitalize())
task_result = task_class.objects.order_by('-created').filter(task__slug=slug, user=user)[0]
task_form_class = eval('Task%sResultForm' % slugify(unicode(slug)).capitalize())
return task_form_class(instance=task_result)
class TaskResultCheckUpdate(AdminRequiredMixin, UpdateView):
template_name = 'hack4lt/task_check_form.html'
success_url = reverse_lazy('tasks')
form_class = TaskResultForm
def get_context_data(self, **kwargs):
context = super(TaskResultCheckUpdate, self).get_context_data(**kwargs)
slug = self.object.task.slug
user = self.object.user
context['task_form'] = get_task_form(slug=slug, user=user)
context['comments'] = TaskComment.objects.order_by('created').filter(task=self.kwargs.get('pk'))
context['comment_form'] = CommentForm()
return context
def get_object(self, queryset=None):
return TaskResult.objects.get(pk=self.kwargs.get('pk'))
def tasks_view(request):
return render(request, 'hack4lt/tasks.html', {})
@login_required(login_url=reverse_lazy('login'))
def task_view(request, task_id):
if task_id == '1':
form_class = Task1Form
elif task_id == '2':
form_class = Task2Form
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES, user=request.user)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse_lazy('tasks'))
else:
form = form_class(user=request.user)
return render(request, 'hack4lt/task.html', {
'form': form,
})
def user_comment_view(request, slug):
user = request.user
task_info = TaskInfo.objects.get(slug=slug)
try:
task = TaskResult.objects.get(user=user, task=task_info)
except TaskResult.DoesNotExist:
raise Http404
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
form.save()
form.instance.user = user
form.instance.task = task
form.instance.save()
return HttpResponseRedirect(reverse_lazy('do-task', kwargs={'slug': slug}))
else:
form = CommentForm()
return HttpResponseRedirect(reverse_lazy('do-task', kwargs={'slug': slug}))
def admin_comment_view(request, pk):
user = request.user
task = TaskResult.objects.get(pk=pk)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
form.save()
form.instance.user = user
form.instance.task = task
form.instance.save()
return HttpResponseRedirect(reverse_lazy('check-task', kwargs={'pk': pk}))
else:
form = CommentForm()
return HttpResponseRedirect(reverse_lazy('check-task', kwargs={'pk': pk}))
@login_required(login_url=reverse_lazy('login'))
def do_task_view(request, slug):
user = request.user
try:
task = TaskInfo.objects.get(slug=slug)
except TaskInfo.DoesNotExist:
raise Http404
try:
eval('Task%sResultForm' % slugify(unicode(task.slug)).capitalize())
except NameError:
return HttpResponseRedirect(reverse_lazy('view-task', kwargs={'slug': slug}))
if TaskResult.objects.filter(user=user, task__slug=slug).exists():
return HttpResponseRedirect(reverse_lazy('update-task', kwargs={'slug': slug}))
return HttpResponseRedirect(reverse_lazy('create-task', kwargs={'slug': slug}))
| bsd-3-clause | -7,714,819,966,082,569,000 | 34.134146 | 104 | 0.662154 | false |
hashems/Mobile-Cloud-Development-Projects | appengine/flexible/endpoints/main_test.py | 11 | 2188 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import pytest
import main
@pytest.fixture
def client(monkeypatch):
monkeypatch.chdir(os.path.dirname(main.__file__))
main.app.testing = True
client = main.app.test_client()
return client
def test_echo(client):
r = client.post(
'/echo',
data='{"message": "Hello"}',
headers={
'Content-Type': 'application/json'
})
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['message'] == 'Hello'
def test_auth_info(client):
endpoints = [
'/auth/info/googlejwt',
'/auth/info/googleidtoken',
'/auth/info/firebase']
encoded_info = base64.b64encode(json.dumps({
'id': '123'
}).encode('utf-8'))
for endpoint in endpoints:
r = client.get(
endpoint,
headers={
'Content-Type': 'application/json'
})
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['id'] == 'anonymous'
r = client.get(
endpoint,
headers={
'Content-Type': 'application/json',
'X-Endpoint-API-UserInfo': encoded_info
})
assert r.status_code == 200
data = json.loads(r.data.decode('utf-8'))
assert data['id'] == '123'
def test_cors(client):
r = client.options(
'/auth/info/firebase', headers={'Origin': 'example.com'})
assert r.status_code == 200
assert r.headers['Access-Control-Allow-Origin'] == '*'
| apache-2.0 | -2,409,759,580,056,539,000 | 25.682927 | 74 | 0.607861 | false |
urrego093/proyecto_mv | applications/admin/languages/pl.py | 17 | 15944 | # -*- coding: utf-8 -*-
{
'!langcode!': 'pl',
'!langname!': 'Polska',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyrażeniem postaci "pole1=\'nowawartość\'". Nie możesz uaktualnić lub usunąć wyników z JOIN:',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%s %%{row} deleted': 'Wierszy usuniętych: %s',
'%s %%{row} updated': 'Wierszy uaktualnionych: %s',
'(requires internet access)': '(requires internet access)',
'(something like "it-it")': '(coś podobnego do "it-it")',
'@markmin\x01Searching: **%s** %%{file}': 'Searching: **%s** files',
'A new version of web2py is available': 'Nowa wersja web2py jest dostępna',
'A new version of web2py is available: %s': 'Nowa wersja web2py jest dostępna: %s',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'UWAGA: Wymagane jest bezpieczne (HTTPS) połączenie lub połączenie z lokalnego adresu.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'UWAGA: TESTOWANIE NIE JEST BEZPIECZNE W ŚRODOWISKU WIELOWĄTKOWYM, TAK WIĘC NIE URUCHAMIAJ WIELU TESTÓW JEDNOCZEŚNIE.',
'ATTENTION: you cannot edit the running application!': 'UWAGA: nie można edytować uruchomionych aplikacji!',
'About': 'informacje',
'About application': 'Informacje o aplikacji',
'Additional code for your application': 'Additional code for your application',
'Admin is disabled because insecure channel': 'Panel administracyjny wyłączony z powodu braku bezpiecznego połączenia',
'Admin is disabled because unsecure channel': 'Panel administracyjny wyłączony z powodu braku bezpiecznego połączenia',
'Administrator Password:': 'Hasło administratora:',
'Application name:': 'Application name:',
'Are you sure you want to delete file "%s"?': 'Czy na pewno chcesz usunąć plik "%s"?',
'Are you sure you want to delete plugin "%s"?': 'Czy na pewno chcesz usunąć wtyczkę "%s"?',
'Are you sure you want to uninstall application "%s"': 'Czy na pewno chcesz usunąć aplikację "%s"',
'Are you sure you want to uninstall application "%s"?': 'Czy na pewno chcesz usunąć aplikację "%s"?',
'Are you sure you want to upgrade web2py now?': 'Are you sure you want to upgrade web2py now?',
'Available databases and tables': 'Dostępne bazy danych i tabele',
'Cannot be empty': 'Nie może być puste',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'Nie można skompilować: w Twojej aplikacji są błędy . Znajdź je, popraw a następnie spróbój ponownie.',
'Cannot compile: there are errors in your app:': 'Cannot compile: there are errors in your app:',
'Change admin password': 'change admin password',
'Check for upgrades': 'check for upgrades',
'Check to delete': 'Zaznacz aby usunąć',
'Checking for upgrades...': 'Sprawdzanie aktualizacji...',
'Clean': 'oczyść',
'Compile': 'skompiluj',
'Controllers': 'Kontrolery',
'Create': 'create',
'Create new simple application': 'Utwórz nową aplikację',
'Current request': 'Aktualne żądanie',
'Current response': 'Aktualna odpowiedź',
'Current session': 'Aktualna sesja',
'DESIGN': 'PROJEKTUJ',
'Date and Time': 'Data i godzina',
'Delete': 'Usuń',
'Delete:': 'Usuń:',
'Deploy': 'deploy',
'Deploy on Google App Engine': 'Umieść na Google App Engine',
'Design for': 'Projekt dla',
'EDIT': 'EDYTUJ',
'Edit': 'edytuj',
'Edit application': 'Edycja aplikacji',
'Edit current record': 'Edytuj aktualny rekord',
'Editing Language file': 'Edytuj plik tłumaczeń',
'Editing file': 'Edycja pliku',
'Editing file "%s"': 'Edycja pliku "%s"',
'Enterprise Web Framework': 'Enterprise Web Framework',
'Error logs for "%(app)s"': 'Wpisy błędów dla "%(app)s"',
'Errors': 'błędy',
'Exception instance attributes': 'Exception instance attributes',
'Functions with no doctests will result in [passed] tests.': 'Funkcje bez doctestów będą dołączone do [zaliczonych] testów.',
'Hello World': 'Witaj Świecie',
'Help': 'pomoc',
'If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code.\nA green title indicates that all tests (if defined) passed. In this case test results are not shown.': 'Jeżeli powyższy raport zawiera numer biletu błędu, oznacza to błąd podczas wykonywania kontrolera przez próbą uruchomienia doctestów. Zazwyczaj jest to spowodowane nieprawidłowymi wcięciami linii kodu lub błędami w module poza ciałem funkcji.\nTytuł w kolorze zielonym oznacza, ze wszystkie (zdefiniowane) testy zakończyły się sukcesem. W tej sytuacji ich wyniki nie są pokazane.',
'Import/Export': 'Importuj/eksportuj',
'Install': 'install',
'Installed applications': 'Zainstalowane aplikacje',
'Internal State': 'Stan wewnętrzny',
'Invalid Query': 'Błędne zapytanie',
'Invalid action': 'Błędna akcja',
'Language files (static strings) updated': 'Pliki tłumaczeń (ciągi statyczne) zostały uaktualnione',
'Languages': 'Tłumaczenia',
'Last saved on:': 'Ostatnio zapisany:',
'License for': 'Licencja dla',
'Login': 'Zaloguj',
'Login to the Administrative Interface': 'Logowanie do panelu administracyjnego',
'Logout': 'wyloguj',
'Models': 'Modele',
'Modules': 'Moduły',
'NO': 'NIE',
'New Record': 'Nowy rekord',
'New application wizard': 'New application wizard',
'New simple application': 'New simple application',
'No databases in this application': 'Brak baz danych w tej aplikacji',
'Original/Translation': 'Oryginał/tłumaczenie',
'Overwrite installed app': 'overwrite installed app',
'PAM authenticated user, cannot change password here': 'PAM authenticated user, cannot change password here',
'Pack all': 'spakuj wszystko',
'Pack compiled': 'spakuj skompilowane',
'Peeking at file': 'Podgląd pliku',
'Plugin "%s" in application': 'Wtyczka "%s" w aplikacji',
'Plugins': 'Wtyczki',
'Powered by': 'Zasilane przez',
'Query:': 'Zapytanie:',
'Remove compiled': 'usuń skompilowane',
'Resolve Conflict file': 'Rozwiąż konflikt plików',
'Rows in table': 'Wiersze w tabeli',
'Rows selected': 'Wierszy wybranych',
'Saved file hash:': 'Suma kontrolna zapisanego pliku:',
'Site': 'strona główna',
'Start wizard': 'start wizard',
'Static files': 'Pliki statyczne',
'Sure you want to delete this object?': 'Czy na pewno chcesz usunąć ten obiekt?',
'TM': 'TM',
'Testing application': 'Testowanie aplikacji',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'wartość\'". Takie coś jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'There are no controllers': 'Brak kontrolerów',
'There are no models': 'Brak modeli',
'There are no modules': 'Brak modułów',
'There are no plugins': 'There are no plugins',
'There are no static files': 'Brak plików statycznych',
'There are no translators, only default language is supported': 'Brak plików tłumaczeń, wspierany jest tylko domyślny język',
'There are no views': 'Brak widoków',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This is the %(filename)s template': 'To jest szablon %(filename)s',
'Ticket': 'Bilet',
'To create a plugin, name a file/folder plugin_[name]': 'Aby utworzyć wtyczkę, nazwij plik/katalog plugin_[nazwa]',
'Translation strings for the application': 'Translation strings for the application',
'Unable to check for upgrades': 'Nie można sprawdzić aktualizacji',
'Unable to download': 'Nie można ściągnąć',
'Unable to download app': 'Nie można ściągnąć aplikacji',
'Unable to download app because:': 'Unable to download app because:',
'Unable to download because': 'Unable to download because',
'Uninstall': 'odinstaluj',
'Update:': 'Uaktualnij:',
'Upload & install packed application': 'Upload & install packed application',
'Upload a package:': 'Upload a package:',
'Upload existing application': 'Wyślij istniejącą aplikację',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Użyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapytań.',
'Use an url:': 'Use an url:',
'Version': 'Wersja',
'Views': 'Widoki',
'Welcome to web2py': 'Witaj w web2py',
'YES': 'TAK',
'additional code for your application': 'dodatkowy kod Twojej aplikacji',
'admin disabled because no admin password': 'panel administracyjny wyłączony z powodu braku hasła administracyjnego',
'admin disabled because not supported on google app engine': 'panel administracyjny wyłączony z powodu braku wsparcia na google apps engine',
'admin disabled because unable to access password file': 'panel administracyjny wyłączony z powodu braku dostępu do pliku z hasłem',
'administrative interface': 'administrative interface',
'and rename it (required):': 'i nadaj jej nową nazwę (wymagane):',
'and rename it:': 'i nadaj mu nową nazwę:',
'appadmin': 'administracja aplikacji',
'appadmin is disabled because insecure channel': 'administracja aplikacji wyłączona z powodu braku bezpiecznego połączenia',
'application "%s" uninstalled': 'aplikacja "%s" została odinstalowana',
'application compiled': 'aplikacja została skompilowana',
'application is compiled and cannot be designed': 'aplikacja jest skompilowana i nie może być projektowana',
'arguments': 'arguments',
'back': 'wstecz',
'cache': 'cache',
'cache, errors and sessions cleaned': 'pamięć podręczna, bilety błędów oraz pliki sesji zostały wyczyszczone',
'cannot create file': 'nie można utworzyć pliku',
'cannot upload file "%(filename)s"': 'nie można wysłać pliku "%(filename)s"',
'check all': 'zaznacz wszystko',
'click here for online examples': 'kliknij aby przejść do interaktywnych przykładów',
'click here for the administrative interface': 'kliknij aby przejść do panelu administracyjnego',
'click to check for upgrades': 'kliknij aby sprawdzić aktualizacje',
'code': 'code',
'collapse/expand all': 'collapse/expand all',
'compiled application removed': 'skompilowana aplikacja została usunięta',
'controllers': 'kontrolery',
'create file with filename:': 'utwórz plik o nazwie:',
'create new application:': 'utwórz nową aplikację:',
'created by': 'utworzone przez',
'crontab': 'crontab',
'currently running': 'currently running',
'currently saved or': 'aktualnie zapisany lub',
'data uploaded': 'dane wysłane',
'database': 'baza danych',
'database %s select': 'wybór z bazy danych %s',
'database administration': 'administracja bazy danych',
'db': 'baza danych',
'defines tables': 'zdefiniuj tabele',
'delete': 'usuń',
'delete all checked': 'usuń wszystkie zaznaczone',
'delete plugin': 'usuń wtyczkę',
'design': 'projektuj',
'direction: ltr': 'direction: ltr',
'done!': 'zrobione!',
'download layouts': 'download layouts',
'download plugins': 'download plugins',
'edit controller': 'edytuj kontroler',
'edit views:': 'edit views:',
'export as csv file': 'eksportuj jako plik csv',
'exposes': 'eksponuje',
'extends': 'rozszerza',
'failed to reload module': 'nie udało się przeładować modułu',
'failed to reload module because:': 'failed to reload module because:',
'file "%(filename)s" created': 'plik "%(filename)s" został utworzony',
'file "%(filename)s" deleted': 'plik "%(filename)s" został usunięty',
'file "%(filename)s" uploaded': 'plik "%(filename)s" został wysłany',
'file "%(filename)s" was not deleted': 'plik "%(filename)s" nie został usunięty',
'file "%s" of %s restored': 'plik "%s" z %s został odtworzony',
'file changed on disk': 'plik na dysku został zmieniony',
'file does not exist': 'plik nie istnieje',
'file saved on %(time)s': 'plik zapisany o %(time)s',
'file saved on %s': 'plik zapisany o %s',
'filter': 'filter',
'htmledit': 'edytuj HTML',
'includes': 'zawiera',
'insert new': 'wstaw nowy rekord tabeli',
'insert new %s': 'wstaw nowy rekord do tabeli %s',
'internal error': 'wewnętrzny błąd',
'invalid password': 'błędne hasło',
'invalid request': 'błędne zapytanie',
'invalid ticket': 'błędny bilet',
'language file "%(filename)s" created/updated': 'plik tłumaczeń "%(filename)s" został utworzony/uaktualniony',
'languages': 'pliki tłumaczeń',
'languages updated': 'pliki tłumaczeń zostały uaktualnione',
'loading...': 'wczytywanie...',
'login': 'zaloguj',
'merge': 'zespól',
'models': 'modele',
'modules': 'moduły',
'new application "%s" created': 'nowa aplikacja "%s" została utworzona',
'new plugin installed': 'nowa wtyczka została zainstalowana',
'new record inserted': 'nowy rekord został wstawiony',
'next 100 rows': 'następne 100 wierszy',
'no match': 'no match',
'or import from csv file': 'lub zaimportuj z pliku csv',
'or provide app url:': 'or provide app url:',
'or provide application url:': 'lub podaj url aplikacji:',
'pack plugin': 'spakuj wtyczkę',
'password changed': 'password changed',
'plugin "%(plugin)s" deleted': 'wtyczka "%(plugin)s" została usunięta',
'plugins': 'plugins',
'previous 100 rows': 'poprzednie 100 wierszy',
'record': 'rekord',
'record does not exist': 'rekord nie istnieje',
'record id': 'ID rekordu',
'restore': 'odtwórz',
'revert': 'przywróć',
'save': 'zapisz',
'selected': 'zaznaczone',
'session expired': 'sesja wygasła',
'shell': 'powłoka',
'some files could not be removed': 'niektóre pliki nie mogły zostać usunięte',
'state': 'stan',
'static': 'pliki statyczne',
'submit': 'wyślij',
'table': 'tabela',
'test': 'testuj',
'the application logic, each URL path is mapped in one exposed function in the controller': 'logika aplikacji, każda ścieżka URL jest mapowana na jedną z funkcji eksponowanych w kontrolerze',
'the data representation, define database tables and sets': 'reprezentacja danych, definicje zbiorów i tabel bazy danych',
'the presentations layer, views are also known as templates': 'warstwa prezentacji, widoki zwane są również szablonami',
'these files are served without processing, your images go here': 'pliki obsługiwane bez interpretacji, to jest miejsce na Twoje obrazy',
'to previous version.': 'do poprzedniej wersji.',
'translation strings for the application': 'ciągi tłumaczeń dla aplikacji',
'try': 'spróbój',
'try something like': 'spróbój czegos takiego jak',
'unable to create application "%s"': 'nie można utworzyć aplikacji "%s"',
'unable to delete file "%(filename)s"': 'nie można usunąć pliku "%(filename)s"',
'unable to delete file plugin "%(plugin)s"': 'nie można usunąc pliku wtyczki "%(plugin)s"',
'unable to parse csv file': 'nie można sparsować pliku csv',
'unable to uninstall "%s"': 'nie można odinstalować "%s"',
'unable to upgrade because "%s"': 'unable to upgrade because "%s"',
'uncheck all': 'odznacz wszystko',
'update': 'uaktualnij',
'update all languages': 'uaktualnij wszystkie pliki tłumaczeń',
'upgrade web2py now': 'upgrade web2py now',
'upload': 'upload',
'upload application:': 'wyślij plik aplikacji:',
'upload file:': 'wyślij plik:',
'upload plugin file:': 'wyślij plik wtyczki:',
'variables': 'variables',
'versioning': 'versioning',
'view': 'widok',
'views': 'widoki',
'web2py Recent Tweets': 'najnowsze tweety web2py',
'web2py is up to date': 'web2py jest aktualne',
'web2py upgraded; please restart it': 'web2py upgraded; please restart it',
}
| gpl-3.0 | 868,159,826,881,448,100 | 55.505415 | 707 | 0.729491 | false |
takahashiminoru/ryu | ryu/lib/packet/packet_utils.py | 23 | 4603 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import six
import socket
import struct
from ryu.lib import addrconv
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(data):
data = six.binary_type(data) # input can be bytearray.
if len(data) % 2:
data += b'\x00'
s = sum(array.array('H', data))
s = (s & 0xffff) + (s >> 16)
s += (s >> 16)
return socket.ntohs(~s & 0xffff)
# avoid circular import
_IPV4_PSEUDO_HEADER_PACK_STR = '!4s4sxBH'
_IPV6_PSEUDO_HEADER_PACK_STR = '!16s16sI3xB'
def checksum_ip(ipvx, length, payload):
"""
calculate checksum of IP pseudo header
IPv4 pseudo header
UDP RFC768
TCP RFC793 3.1
0 7 8 15 16 23 24 31
+--------+--------+--------+--------+
| source address |
+--------+--------+--------+--------+
| destination address |
+--------+--------+--------+--------+
| zero |protocol| length |
+--------+--------+--------+--------+
IPv6 pseudo header
RFC2460 8.1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Source Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| |
+ +
| |
+ Destination Address +
| |
+ +
| |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Upper-Layer Packet Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| zero | Next Header |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
"""
if ipvx.version == 4:
header = struct.pack(_IPV4_PSEUDO_HEADER_PACK_STR,
addrconv.ipv4.text_to_bin(ipvx.src),
addrconv.ipv4.text_to_bin(ipvx.dst),
ipvx.proto, length)
elif ipvx.version == 6:
header = struct.pack(_IPV6_PSEUDO_HEADER_PACK_STR,
addrconv.ipv6.text_to_bin(ipvx.src),
addrconv.ipv6.text_to_bin(ipvx.dst),
length, ipvx.nxt)
else:
raise ValueError('Unknown IP version %d' % ipvx.version)
buf = header + payload
return checksum(buf)
_MODX = 4102
def fletcher_checksum(data, offset):
"""
Fletcher Checksum -- Refer to RFC1008
calling with offset == _FLETCHER_CHECKSUM_VALIDATE will validate the
checksum without modifying the buffer; a valid checksum returns 0.
"""
c0 = 0
c1 = 0
pos = 0
length = len(data)
data = bytearray(data)
data[offset:offset + 2] = [0] * 2
while pos < length:
tlen = min(length - pos, _MODX)
for d in data[pos:pos + tlen]:
c0 += d
c1 += c0
c0 %= 255
c1 %= 255
pos += tlen
x = ((length - offset - 1) * c0 - c1) % 255
if x <= 0:
x += 255
y = 510 - c0 - x
if y > 255:
y -= 255
data[offset] = x
data[offset + 1] = y
return (x << 8) | (y & 0xff)
| apache-2.0 | 8,283,459,835,302,272,000 | 32.59854 | 72 | 0.383446 | false |
alfredgamulo/cloud-custodian | tools/c7n_azure/tests_azure/test_utils_ip_range_helper.py | 2 | 3534 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import pytest
from .azure_common import BaseTest, cassette_name
from c7n_azure.utils import IpRangeHelper
from netaddr import IPRange, IPSet
class IpRangeHelperTest(BaseTest):
def test_empty(self):
data = {'whatever': []}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet()
self.assertEqual(expected, actual)
def test_absent(self):
data = {'whatever': []}
actual = IpRangeHelper.parse_ip_ranges(data, 'nosuch')
self.assertIsNone(actual)
def test_parse_range_and_net(self):
data = {'whatever': ['0.0.0.0-10.10.10.10', '10.20.20.0/24']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(IPRange('0.0.0.0', '10.10.10.10')) | \
IPSet(IPRange('10.20.20.0', '10.20.20.255'))
self.assertEqual(expected, actual)
def test_parse_multi_net(self):
data = {'whatever': ['1.2.2.127/32', '1.2.2.128/25']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(IPRange('1.2.2.127', '1.2.2.127')) | \
IPSet(IPRange('1.2.2.128', '1.2.2.255'))
self.assertEqual(expected, actual)
def test_parse_spaces(self):
data = {'whatever': ['0.0.0.0 - 10.10.10.10', '10.20.20.0 / 24']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(IPRange('0.0.0.0', '10.10.10.10')) | \
IPSet(IPRange('10.20.20.0', '10.20.20.255'))
self.assertEqual(expected, actual)
def test_parse_extra_dash(self):
data = {'whatever': ['0.0.0.0-10.10.10.10-10.10.10.10']}
with self.assertRaises(Exception) as context:
IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected_error = 'Invalid range. Use x.x.x.x-y.y.y.y or x.x.x.x or x.x.x.x/y.'
self.assertTrue(expected_error in str(context.exception))
def test_parse_single_ip(self):
data = {'whatever': ['1.2.2.127']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(IPRange('1.2.2.127', '1.2.2.127'))
self.assertEqual(expected, actual)
# Service Tag IP lists are dynamic and will always be changing in live tests
@pytest.mark.skiplive
@cassette_name('servicetags')
def test_parse_alias(self):
data = {'whatever': ['ServiceTags.ApiManagement.WestUS']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(['13.64.39.16/32', '40.112.242.148/31', '40.112.243.240/28'])
self.assertEqual(expected, actual)
# Service Tag IP lists are dynamic and will always be changing in live tests
@pytest.mark.skiplive
@cassette_name('servicetags')
def test_parse_alias_and_blocks(self):
data = {'whatever': ['ServiceTags.ApiManagement.WestUS', '1.2.2.127', '1.2.2.128/25']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(['13.64.39.16/32', '40.112.242.148/31', '40.112.243.240/28',
'1.2.2.127/32', '1.2.2.128/25'])
self.assertEqual(expected, actual)
@cassette_name('servicetags')
def test_parse_alias_invalid(self):
data = {'whatever': ['ServiceTags.ApiManagement.Invalid', '1.2.2.127', '1.2.2.128/25']}
actual = IpRangeHelper.parse_ip_ranges(data, 'whatever')
expected = IPSet(['1.2.2.127/32', '1.2.2.128/25'])
self.assertEqual(expected, actual)
| apache-2.0 | -4,204,049,722,132,447,000 | 42.62963 | 95 | 0.616582 | false |
erinspace/osf.io | osf/models/quickfiles.py | 5 | 2494 | from __future__ import unicode_literals
import logging
from osf.models.node import AbstractNode, AbstractNodeManager
from website.exceptions import NodeStateError
logger = logging.getLogger(__name__)
class QuickFilesNodeManager(AbstractNodeManager):
def create_for_user(self, user):
possessive_title = get_quickfiles_project_title(user)
quickfiles, created = QuickFilesNode.objects.get_or_create(
title=possessive_title,
creator=user
)
if not created:
raise NodeStateError('Users may only have one quickfiles project')
quickfiles.add_addon('osfstorage', auth=None, log=False)
return quickfiles
def get_for_user(self, user):
return QuickFilesNode.objects.get(creator=user)
class QuickFilesNode(AbstractNode):
__guid_min_length__ = 10
objects = QuickFilesNodeManager()
def __init__(self, *args, **kwargs):
kwargs['is_public'] = True
super(QuickFilesNode, self).__init__(*args, **kwargs)
def remove_node(self, auth, date=None):
# QuickFilesNodes are only delete-able for disabled users
# This is only done when doing a GDPR-delete
if auth.user.is_disabled:
super(QuickFilesNode, self).remove_node(auth=auth, date=date)
else:
raise NodeStateError('A QuickFilesNode may not be deleted.')
def set_privacy(self, permissions, *args, **kwargs):
raise NodeStateError('You may not set privacy for a QuickFilesNode.')
def add_contributor(self, contributor, *args, **kwargs):
raise NodeStateError('A QuickFilesNode may not have additional contributors.')
def clone(self):
raise NodeStateError('A QuickFilesNode may not be forked, used as a template, or registered.')
def add_addon(self, name, auth, log=True):
if name != 'osfstorage':
raise NodeStateError('A QuickFilesNode can only have the osfstorage addon.')
return super(QuickFilesNode, self).add_addon(name, auth, log)
@property
def is_registration(self):
"""For v1 compat."""
return False
@property
def is_collection(self):
"""For v1 compat."""
return False
@property
def is_quickfiles(self):
return True
def get_quickfiles_project_title(user):
possessive_title_name = user.fullname + "'s" if user.fullname[-1] != 's' else user.fullname + "'"
return '{} Quick Files'.format(possessive_title_name)
| apache-2.0 | 4,468,670,767,913,228,000 | 29.790123 | 102 | 0.661588 | false |
diofant/diofant | diofant/domains/field.py | 1 | 1562 | """Implementation of :class:`Field` class."""
from .ring import CommutativeRing
class Field(CommutativeRing):
"""Represents a field domain."""
is_Field = True
@property
def ring(self):
raise AttributeError(f'there is no ring associated with {self}')
@property
def field(self):
"""Returns a field associated with ``self``."""
return self
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies ``__truediv__``."""
return a / b
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__truediv__``."""
return a / b
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies nothing."""
return self.zero
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__truediv__``."""
return a / b, self.zero
def gcd(self, a, b):
"""
Returns GCD of ``a`` and ``b``.
This definition of GCD over fields allows to clear denominators
in `primitive()`.
>>> QQ.gcd(QQ(2, 3), QQ(4, 9))
2/9
>>> gcd(Rational(2, 3), Rational(4, 9))
2/9
>>> primitive(2*x/3 + Rational(4, 9))
(2/9, 3*x + 2)
"""
try:
ring = self.ring
except (AttributeError, NotImplementedError):
return self.one
p = ring.gcd(a.numerator, b.numerator)
q = ring.lcm(a.denominator, b.denominator)
return self.convert(p, ring)/q
def lcm(self, a, b):
return (a*b)/self.gcd(a, b)
| bsd-3-clause | -3,026,501,311,636,222,500 | 24.606557 | 73 | 0.516645 | false |
westphahl/verleihsystem | verleihsystem/contact/views.py | 1 | 1790 | from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.conf import settings
from contact.forms import ContactForm
def contact_form(request):
"""
Displays and processes the email contact form.
The email is sent to the recipient defined by the CONTACT_FORM_EMAIL
setting. If the user is logged in, the form is populated with the user's
name and email address.
"""
if request.method == 'POST':
# Process the form
form = ContactForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
mail = form.cleaned_data['mail']
subject = "[Verleihsystem:Kontakt]: " + form.cleaned_data['subject']
message = form.cleaned_data['message']
cc_myself = form.cleaned_data['cc_myself']
recipients = [getattr(settings, 'CONTACT_FORM_EMAIL', '')]
# CC the sender
if cc_myself:
recipients.append(mail)
email = EmailMessage(subject=subject, body=message,
to=recipients, headers={'Reply-To': mail})
email.send()
return redirect(reverse('home'))
else:
# Display the empty form
if request.user.is_anonymous():
form = ContactForm()
else:
name = "%s %s" % (request.user.first_name, request.user.last_name)
mail = request.user.email
form = ContactForm(initial={'name': name, 'mail': mail})
return render_to_response('contact/contact_form.html', {'form': form,},
context_instance=RequestContext(request))
| isc | 6,255,350,618,332,268,000 | 36.291667 | 80 | 0.606704 | false |
gregdek/ansible | test/units/modules/net_tools/nios/test_nios_ptr_record.py | 49 | 6659 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_ptr_record
from ansible.module_utils.net_tools.nios import api
from units.compat.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosPTRRecordModule(TestNiosModule):
module = nios_ptr_record
def setUp(self):
super(TestNiosPTRRecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_ptr_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_ptr_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_ptr_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosPTRRecordModule, self).tearDown()
self.mock_wapi.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_ptr_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'ipv4addr': '10.36.241.14', 'ptrdname': 'ansible.test.com'})
def test_nios_ptr_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': None, 'extattrs': None}
ref = "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default"
test_object = [{
"comment": "test comment",
"_ref": ref,
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
def test_nios_ptr_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.com',
'ipv4addr': '10.36.241.14', 'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"extattrs": {}
}
]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
def test_nios_ptr_record_update_record_ptrdname(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible.test.org',
'ipv4addr': '10.36.241.14', 'comment': 'comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "record:ptr/ZG5zLm5ldHdvcmtfdmlldyQw:14.241.36.10.in-addr.arpa/default",
"ptrdname": "ansible.test.com",
"ipv4addr": "10.36.241.14",
"extattrs": {}
}
]
test_spec = {
"ipv4addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.update_object.called_once_with(test_object)
def test_nios_ptr6_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'ptrdname': 'ansible6.test.com',
'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {"ipv6addr": {"ib_req": True},
"ptrdname": {"ib_req": True},
"comment": {},
"extattrs": {}}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'ipv6addr': '2002:8ac3:802d:1242:20d:60ff:fe38:6d16', 'ptrdname': 'ansible6.test.com'})
| gpl-3.0 | -2,033,535,488,815,220,200 | 37.051429 | 153 | 0.577114 | false |
Belxjander/Kirito | Python-3.5.0-Amiga/Lib/test/test_ipaddress.py | 6 | 83267 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""Unittest for ipaddress module."""
import unittest
import re
import contextlib
import functools
import operator
import pickle
import ipaddress
import weakref
class BaseTestCase(unittest.TestCase):
# One big change in ipaddress over the original ipaddr module is
# error reporting that tries to assume users *don't know the rules*
# for what constitutes an RFC compliant IP address
# Ensuring these errors are emitted correctly in all relevant cases
# meant moving to a more systematic test structure that allows the
# test structure to map more directly to the module structure
# Note that if the constructors are refactored so that addresses with
# multiple problems get classified differently, that's OK - just
# move the affected examples to the newly appropriate test case.
# There is some duplication between the original relatively ad hoc
# test suite and the new systematic tests. While some redundancy in
# testing is considered preferable to accidentally deleting a valid
# test, the original test suite will likely be reduced over time as
# redundant tests are identified.
@property
def factory(self):
raise NotImplementedError
@contextlib.contextmanager
def assertCleanError(self, exc_type, details, *args):
"""
Ensure exception does not display a context by default
Wraps unittest.TestCase.assertRaisesRegex
"""
if args:
details = details % args
cm = self.assertRaisesRegex(exc_type, details)
with cm as exc:
yield exc
# Ensure we produce clean tracebacks on failure
if exc.exception.__context__ is not None:
self.assertTrue(exc.exception.__suppress_context__)
def assertAddressError(self, details, *args):
"""Ensure a clean AddressValueError"""
return self.assertCleanError(ipaddress.AddressValueError,
details, *args)
def assertNetmaskError(self, details, *args):
"""Ensure a clean NetmaskValueError"""
return self.assertCleanError(ipaddress.NetmaskValueError,
details, *args)
def assertInstancesEqual(self, lhs, rhs):
"""Check constructor arguments produce equivalent instances"""
self.assertEqual(self.factory(lhs), self.factory(rhs))
class CommonTestMixin:
def test_empty_address(self):
with self.assertAddressError("Address cannot be empty"):
self.factory("")
def test_floats_rejected(self):
with self.assertAddressError(re.escape(repr("1.0"))):
self.factory(1.0)
def test_not_an_index_issue15559(self):
# Implementing __index__ makes for a very nasty interaction with the
# bytes constructor. Thus, we disallow implicit use as an integer
self.assertRaises(TypeError, operator.index, self.factory(1))
self.assertRaises(TypeError, hex, self.factory(1))
self.assertRaises(TypeError, bytes, self.factory(1))
def pickle_test(self, addr):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(proto=proto):
x = self.factory(addr)
y = pickle.loads(pickle.dumps(x, proto))
self.assertEqual(y, x)
class CommonTestMixin_v4(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("000.000.000.000", "0.0.0.0")
self.assertInstancesEqual("192.168.000.001", "192.168.0.1")
def test_int(self):
self.assertInstancesEqual(0, "0.0.0.0")
self.assertInstancesEqual(3232235521, "192.168.0.1")
def test_packed(self):
self.assertInstancesEqual(bytes.fromhex("00000000"), "0.0.0.0")
self.assertInstancesEqual(bytes.fromhex("c0a80001"), "192.168.0.1")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**32) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % 2**32)):
self.factory(2**32)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 4) is not permitted as an IPv4 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
assertBadLength(3)
assertBadLength(5)
class CommonTestMixin_v6(CommonTestMixin):
def test_leading_zeros(self):
self.assertInstancesEqual("0000::0000", "::")
self.assertInstancesEqual("000::c0a8:0001", "::c0a8:1")
def test_int(self):
self.assertInstancesEqual(0, "::")
self.assertInstancesEqual(3232235521, "::c0a8:1")
def test_packed(self):
addr = bytes(12) + bytes.fromhex("00000000")
self.assertInstancesEqual(addr, "::")
addr = bytes(12) + bytes.fromhex("c0a80001")
self.assertInstancesEqual(addr, "::c0a8:1")
addr = bytes.fromhex("c0a80001") + bytes(12)
self.assertInstancesEqual(addr, "c0a8:1::")
def test_negative_ints_rejected(self):
msg = "-1 (< 0) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg)):
self.factory(-1)
def test_large_ints_rejected(self):
msg = "%d (>= 2**128) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % 2**128)):
self.factory(2**128)
def test_bad_packed_length(self):
def assertBadLength(length):
addr = bytes(length)
msg = "%r (len %d != 16) is not permitted as an IPv6 address"
with self.assertAddressError(re.escape(msg % (addr, length))):
self.factory(addr)
self.factory(addr)
assertBadLength(15)
assertBadLength(17)
class AddressTestCase_v4(BaseTestCase, CommonTestMixin_v4):
factory = ipaddress.IPv4Address
def test_network_passed_as_address(self):
addr = "127.0.0.1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv4Address(addr)
def test_bad_address_split(self):
def assertBadSplit(addr):
with self.assertAddressError("Expected 4 octets in %r", addr):
ipaddress.IPv4Address(addr)
assertBadSplit("127.0.1")
assertBadSplit("42.42.42.42.42")
assertBadSplit("42.42.42")
assertBadSplit("42.42")
assertBadSplit("42")
assertBadSplit("42..42.42.42")
assertBadSplit("42.42.42.42.")
assertBadSplit("42.42.42.42...")
assertBadSplit(".42.42.42.42")
assertBadSplit("...42.42.42.42")
assertBadSplit("016.016.016")
assertBadSplit("016.016")
assertBadSplit("016")
assertBadSplit("000")
assertBadSplit("0x0a.0x0a.0x0a")
assertBadSplit("0x0a.0x0a")
assertBadSplit("0x0a")
assertBadSplit(".")
assertBadSplit("bogus")
assertBadSplit("bogus.com")
assertBadSplit("1000")
assertBadSplit("1000000000000000")
assertBadSplit("192.168.0.1.com")
def test_empty_octet(self):
def assertBadOctet(addr):
with self.assertAddressError("Empty octet not permitted in %r",
addr):
ipaddress.IPv4Address(addr)
assertBadOctet("42..42.42")
assertBadOctet("...")
def test_invalid_characters(self):
def assertBadOctet(addr, octet):
msg = "Only decimal digits permitted in %r in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("0x0a.0x0a.0x0a.0x0a", "0x0a")
assertBadOctet("0xa.0x0a.0x0a.0x0a", "0xa")
assertBadOctet("42.42.42.-0", "-0")
assertBadOctet("42.42.42.+0", "+0")
assertBadOctet("42.42.42.-42", "-42")
assertBadOctet("+1.+2.+3.4", "+1")
assertBadOctet("1.2.3.4e0", "4e0")
assertBadOctet("1.2.3.4::", "4::")
assertBadOctet("1.a.2.3", "a")
def test_octal_decimal_ambiguity(self):
def assertBadOctet(addr, octet):
msg = "Ambiguous (octal/decimal) value in %r not permitted in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("016.016.016.016", "016")
assertBadOctet("001.000.008.016", "008")
def test_octet_length(self):
def assertBadOctet(addr, octet):
msg = "At most 3 characters permitted in %r in %r"
with self.assertAddressError(re.escape(msg % (octet, addr))):
ipaddress.IPv4Address(addr)
assertBadOctet("0000.000.000.000", "0000")
assertBadOctet("12345.67899.-54321.-98765", "12345")
def test_octet_limit(self):
def assertBadOctet(addr, octet):
msg = "Octet %d (> 255) not permitted in %r" % (octet, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv4Address(addr)
assertBadOctet("257.0.0.0", 257)
assertBadOctet("192.168.0.999", 999)
def test_pickle(self):
self.pickle_test('192.0.2.1')
def test_weakref(self):
weakref.ref(self.factory('192.0.2.1'))
class AddressTestCase_v6(BaseTestCase, CommonTestMixin_v6):
factory = ipaddress.IPv6Address
def test_network_passed_as_address(self):
addr = "::1/24"
with self.assertAddressError("Unexpected '/' in %r", addr):
ipaddress.IPv6Address(addr)
def test_bad_address_split_v6_not_enough_parts(self):
def assertBadSplit(addr):
msg = "At least 3 parts expected in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":")
assertBadSplit(":1")
assertBadSplit("FEDC:9878")
def test_bad_address_split_v6_too_many_colons(self):
def assertBadSplit(addr):
msg = "At most 8 colons permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("9:8:7:6:5:4:3::2:1")
assertBadSplit("10:9:8:7:6:5:4:3:2:1")
assertBadSplit("::8:7:6:5:4:3:2:1")
assertBadSplit("8:7:6:5:4:3:2:1::")
# A trailing IPv4 address is two parts
assertBadSplit("10:9:8:7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts(self):
def assertBadSplit(addr):
msg = "Exactly 8 parts expected without '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe:0:0:0:0:0:0:0:1")
assertBadSplit("9:8:7:6:5:4:3:2:1")
assertBadSplit("7:6:5:4:3:2:1")
# A trailing IPv4 address is two parts
assertBadSplit("9:8:7:6:5:4:3:42.42.42.42")
assertBadSplit("7:6:5:4:3:42.42.42.42")
def test_bad_address_split_v6_too_many_parts_with_double_colon(self):
def assertBadSplit(addr):
msg = "Expected at most 7 other parts with '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("1:2:3:4::5:6:7:8")
def test_bad_address_split_v6_repeated_double_colon(self):
def assertBadSplit(addr):
msg = "At most one '::' permitted in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("3ffe::1::1")
assertBadSplit("1::2::3::4:5")
assertBadSplit("2001::db:::1")
assertBadSplit("3ffe::1::")
assertBadSplit("::3ffe::1")
assertBadSplit(":3ffe::1::1")
assertBadSplit("3ffe::1::1:")
assertBadSplit(":3ffe::1::1:")
assertBadSplit(":::")
assertBadSplit('2001:db8:::1')
def test_bad_address_split_v6_leading_colon(self):
def assertBadSplit(addr):
msg = "Leading ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit(":2001:db8::1")
assertBadSplit(":1:2:3:4:5:6:7")
assertBadSplit(":1:2:3:4:5:6:")
assertBadSplit(":6:5:4:3:2:1::")
def test_bad_address_split_v6_trailing_colon(self):
def assertBadSplit(addr):
msg = "Trailing ':' only permitted as part of '::' in %r"
with self.assertAddressError(msg, addr):
ipaddress.IPv6Address(addr)
assertBadSplit("2001:db8::1:")
assertBadSplit("1:2:3:4:5:6:7:")
assertBadSplit("::1.2.3.4:")
assertBadSplit("::7:6:5:4:3:2:")
def test_bad_v4_part_in(self):
def assertBadAddressPart(addr, v4_error):
with self.assertAddressError("%s in %r", v4_error, addr):
ipaddress.IPv6Address(addr)
assertBadAddressPart("3ffe::1.net", "Expected 4 octets in '1.net'")
assertBadAddressPart("3ffe::127.0.1",
"Expected 4 octets in '127.0.1'")
assertBadAddressPart("::1.2.3",
"Expected 4 octets in '1.2.3'")
assertBadAddressPart("::1.2.3.4.5",
"Expected 4 octets in '1.2.3.4.5'")
assertBadAddressPart("3ffe::1.1.1.net",
"Only decimal digits permitted in 'net' "
"in '1.1.1.net'")
def test_invalid_characters(self):
def assertBadPart(addr, part):
msg = "Only hex digits permitted in %r in %r" % (part, addr)
with self.assertAddressError(re.escape(msg)):
ipaddress.IPv6Address(addr)
assertBadPart("3ffe::goog", "goog")
assertBadPart("3ffe::-0", "-0")
assertBadPart("3ffe::+0", "+0")
assertBadPart("3ffe::-1", "-1")
assertBadPart("1.2.3.4::", "1.2.3.4")
assertBadPart('1234:axy::b', "axy")
def test_part_length(self):
def assertBadPart(addr, part):
msg = "At most 4 characters permitted in %r in %r"
with self.assertAddressError(msg, part, addr):
ipaddress.IPv6Address(addr)
assertBadPart("::00000", "00000")
assertBadPart("3ffe::10000", "10000")
assertBadPart("02001:db8::", "02001")
assertBadPart('2001:888888::1', "888888")
def test_pickle(self):
self.pickle_test('2001:db8::')
def test_weakref(self):
weakref.ref(self.factory('2001:db8::'))
class NetmaskTestMixin_v4(CommonTestMixin_v4):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "1.2.3.4/32/24"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("bogus", "Expected 4 octets")
assertBadAddress("google.com", "Expected 4 octets")
assertBadAddress("10/8", "Expected 4 octets")
assertBadAddress("::1.2.3.4", "Only decimal digits")
assertBadAddress("1.2.3.256", re.escape("256 (> 255)"))
def test_valid_netmask(self):
self.assertEqual(str(self.factory('192.0.2.0/255.255.255.0')),
'192.0.2.0/24')
for i in range(0, 33):
# Generate and re-parse the CIDR format (trivial).
net_str = '0.0.0.0/%d' % i
net = self.factory(net_str)
self.assertEqual(str(net), net_str)
# Generate and re-parse the expanded netmask.
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.netmask)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('0.0.0.0/0%d' % i)), net_str)
# Generate and re-parse the expanded hostmask. The ambiguous
# cases (/0 and /32) are treated as netmasks.
if i in (32, 0):
net_str = '0.0.0.0/%d' % (32 - i)
self.assertEqual(
str(self.factory('0.0.0.0/%s' % net.hostmask)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("1.2.3.4", "")
assertBadNetmask("1.2.3.4", "-1")
assertBadNetmask("1.2.3.4", "+1")
assertBadNetmask("1.2.3.4", " 1 ")
assertBadNetmask("1.2.3.4", "0x1")
assertBadNetmask("1.2.3.4", "33")
assertBadNetmask("1.2.3.4", "254.254.255.256")
assertBadNetmask("1.2.3.4", "1.a.2.3")
assertBadNetmask("1.1.1.1", "254.xyz.2.3")
assertBadNetmask("1.1.1.1", "240.255.0.0")
assertBadNetmask("1.1.1.1", "255.254.128.0")
assertBadNetmask("1.1.1.1", "0.1.127.255")
assertBadNetmask("1.1.1.1", "pudding")
assertBadNetmask("1.1.1.1", "::")
def test_pickle(self):
self.pickle_test('192.0.2.0/27')
self.pickle_test('192.0.2.0/31') # IPV4LENGTH - 1
self.pickle_test('192.0.2.0') # IPV4LENGTH
class InterfaceTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Interface
class NetworkTestCase_v4(BaseTestCase, NetmaskTestMixin_v4):
factory = ipaddress.IPv4Network
class NetmaskTestMixin_v6(CommonTestMixin_v6):
"""Input validation on interfaces and networks is very similar"""
def test_split_netmask(self):
addr = "cafe:cafe::/128/190"
with self.assertAddressError("Only one '/' permitted in %r" % addr):
self.factory(addr)
def test_address_errors(self):
def assertBadAddress(addr, details):
with self.assertAddressError(details):
self.factory(addr)
assertBadAddress("/", "Address cannot be empty")
assertBadAddress("/8", "Address cannot be empty")
assertBadAddress("google.com", "At least 3 parts")
assertBadAddress("1.2.3.4", "At least 3 parts")
assertBadAddress("10/8", "At least 3 parts")
assertBadAddress("1234:axy::b", "Only hex digits")
def test_valid_netmask(self):
# We only support CIDR for IPv6, because expanded netmasks are not
# standard notation.
self.assertEqual(str(self.factory('2001:db8::/32')), '2001:db8::/32')
for i in range(0, 129):
# Generate and re-parse the CIDR format (trivial).
net_str = '::/%d' % i
self.assertEqual(str(self.factory(net_str)), net_str)
# Zero prefix is treated as decimal.
self.assertEqual(str(self.factory('::/0%d' % i)), net_str)
def test_netmask_errors(self):
def assertBadNetmask(addr, netmask):
msg = "%r is not a valid netmask" % netmask
with self.assertNetmaskError(re.escape(msg)):
self.factory("%s/%s" % (addr, netmask))
assertBadNetmask("::1", "")
assertBadNetmask("::1", "::1")
assertBadNetmask("::1", "1::")
assertBadNetmask("::1", "-1")
assertBadNetmask("::1", "+1")
assertBadNetmask("::1", " 1 ")
assertBadNetmask("::1", "0x1")
assertBadNetmask("::1", "129")
assertBadNetmask("::1", "1.2.3.4")
assertBadNetmask("::1", "pudding")
assertBadNetmask("::", "::")
def test_pickle(self):
self.pickle_test('2001:db8::1000/124')
self.pickle_test('2001:db8::1000/127') # IPV6LENGTH - 1
self.pickle_test('2001:db8::1000') # IPV6LENGTH
class InterfaceTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Interface
class NetworkTestCase_v6(BaseTestCase, NetmaskTestMixin_v6):
factory = ipaddress.IPv6Network
class FactoryFunctionErrors(BaseTestCase):
def assertFactoryError(self, factory, kind):
"""Ensure a clean ValueError with the expected message"""
addr = "camelot"
msg = '%r does not appear to be an IPv4 or IPv6 %s'
with self.assertCleanError(ValueError, msg, addr, kind):
factory(addr)
def test_ip_address(self):
self.assertFactoryError(ipaddress.ip_address, "address")
def test_ip_interface(self):
self.assertFactoryError(ipaddress.ip_interface, "interface")
def test_ip_network(self):
self.assertFactoryError(ipaddress.ip_network, "network")
@functools.total_ordering
class LargestObject:
def __eq__(self, other):
return isinstance(other, LargestObject)
def __lt__(self, other):
return False
@functools.total_ordering
class SmallestObject:
def __eq__(self, other):
return isinstance(other, SmallestObject)
def __gt__(self, other):
return False
class ComparisonTests(unittest.TestCase):
v4addr = ipaddress.IPv4Address(1)
v4net = ipaddress.IPv4Network(1)
v4intf = ipaddress.IPv4Interface(1)
v6addr = ipaddress.IPv6Address(1)
v6net = ipaddress.IPv6Network(1)
v6intf = ipaddress.IPv6Interface(1)
v4_addresses = [v4addr, v4intf]
v4_objects = v4_addresses + [v4net]
v6_addresses = [v6addr, v6intf]
v6_objects = v6_addresses + [v6net]
objects = v4_objects + v6_objects
def test_foreign_type_equality(self):
# __eq__ should never raise TypeError directly
other = object()
for obj in self.objects:
self.assertNotEqual(obj, other)
self.assertFalse(obj == other)
self.assertEqual(obj.__eq__(other), NotImplemented)
self.assertEqual(obj.__ne__(other), NotImplemented)
def test_mixed_type_equality(self):
# Ensure none of the internal objects accidentally
# expose the right set of attributes to become "equal"
for lhs in self.objects:
for rhs in self.objects:
if lhs is rhs:
continue
self.assertNotEqual(lhs, rhs)
def test_containment(self):
for obj in self.v4_addresses:
self.assertIn(obj, self.v4net)
for obj in self.v6_addresses:
self.assertIn(obj, self.v6net)
for obj in self.v4_objects + [self.v6net]:
self.assertNotIn(obj, self.v6net)
for obj in self.v6_objects + [self.v4net]:
self.assertNotIn(obj, self.v4net)
def test_mixed_type_ordering(self):
for lhs in self.objects:
for rhs in self.objects:
if isinstance(lhs, type(rhs)) or isinstance(rhs, type(lhs)):
continue
self.assertRaises(TypeError, lambda: lhs < rhs)
self.assertRaises(TypeError, lambda: lhs > rhs)
self.assertRaises(TypeError, lambda: lhs <= rhs)
self.assertRaises(TypeError, lambda: lhs >= rhs)
def test_foreign_type_ordering(self):
other = object()
smallest = SmallestObject()
largest = LargestObject()
for obj in self.objects:
with self.assertRaises(TypeError):
obj < other
with self.assertRaises(TypeError):
obj > other
with self.assertRaises(TypeError):
obj <= other
with self.assertRaises(TypeError):
obj >= other
self.assertTrue(obj < largest)
self.assertFalse(obj > largest)
self.assertTrue(obj <= largest)
self.assertFalse(obj >= largest)
self.assertFalse(obj < smallest)
self.assertTrue(obj > smallest)
self.assertFalse(obj <= smallest)
self.assertTrue(obj >= smallest)
def test_mixed_type_key(self):
# with get_mixed_type_key, you can sort addresses and network.
v4_ordered = [self.v4addr, self.v4net, self.v4intf]
v6_ordered = [self.v6addr, self.v6net, self.v6intf]
self.assertEqual(v4_ordered,
sorted(self.v4_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v6_ordered,
sorted(self.v6_objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(v4_ordered + v6_ordered,
sorted(self.objects,
key=ipaddress.get_mixed_type_key))
self.assertEqual(NotImplemented, ipaddress.get_mixed_type_key(object))
def test_incompatible_versions(self):
# These should always raise TypeError
v4addr = ipaddress.ip_address('1.1.1.1')
v4net = ipaddress.ip_network('1.1.1.1')
v6addr = ipaddress.ip_address('::1')
v6net = ipaddress.ip_network('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4_address = ipaddress.IPv4Address('1.2.3.4')
self.ipv4_interface = ipaddress.IPv4Interface('1.2.3.4/24')
self.ipv4_network = ipaddress.IPv4Network('1.2.3.0/24')
#self.ipv4_hostmask = ipaddress.IPv4Interface('10.0.0.1/0.255.255.255')
self.ipv6_address = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1')
self.ipv6_interface = ipaddress.IPv6Interface(
'2001:658:22a:cafe:200:0:0:1/64')
self.ipv6_network = ipaddress.IPv6Network('2001:658:22a:cafe::/64')
def testRepr(self):
self.assertEqual("IPv4Interface('1.2.3.4/32')",
repr(ipaddress.IPv4Interface('1.2.3.4')))
self.assertEqual("IPv6Interface('::1/128')",
repr(ipaddress.IPv6Interface('::1')))
# issue #16531: constructing IPv4Network from a (address, mask) tuple
def testIPv4Tuple(self):
# /32
ip = ipaddress.IPv4Address('192.0.2.1')
net = ipaddress.IPv4Network('192.0.2.1/32')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 32)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 32)), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 32)), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.255')), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.255')), net)
# strict=True and host bits set
with self.assertRaises(ValueError):
ipaddress.IPv4Network(('192.0.2.1', 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((ip, 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((3221225985, 24))
with self.assertRaises(ValueError):
ipaddress.IPv4Network(('192.0.2.1', '255.255.255.0'))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((ip, '255.255.255.0'))
with self.assertRaises(ValueError):
ipaddress.IPv4Network((3221225985, '255.255.255.0'))
# strict=False and host bits set
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1', 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985, 24),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.1',
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0'),
strict=False), net)
self.assertEqual(ipaddress.IPv4Network((3221225985,
'255.255.255.0'),
strict=False), net)
# /24
ip = ipaddress.IPv4Address('192.0.2.0')
net = ipaddress.IPv4Network('192.0.2.0/24')
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0',
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((ip,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network((3221225984,
'255.255.255.0')), net)
self.assertEqual(ipaddress.IPv4Network(('192.0.2.0', 24)), net)
self.assertEqual(ipaddress.IPv4Network((ip, 24)), net)
self.assertEqual(ipaddress.IPv4Network((3221225984, 24)), net)
self.assertEqual(ipaddress.IPv4Interface(('192.0.2.1', 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
self.assertEqual(ipaddress.IPv4Interface((3221225985, 24)),
ipaddress.IPv4Interface('192.0.2.1/24'))
# issue #16531: constructing IPv6Network from a (address, mask) tuple
def testIPv6Tuple(self):
# /128
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/128')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '128')),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 128)),
net)
self.assertEqual(ipaddress.IPv6Network((ip, '128')),
net)
ip = ipaddress.IPv6Address('2001:db8::')
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::', '96')),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826560, 96)),
net)
self.assertEqual(ipaddress.IPv6Network((ip, '96')),
net)
# strict=True and host bits set
ip = ipaddress.IPv6Address('2001:db8::1')
with self.assertRaises(ValueError):
ipaddress.IPv6Network(('2001:db8::1', 96))
with self.assertRaises(ValueError):
ipaddress.IPv6Network((
42540766411282592856903984951653826561, 96))
with self.assertRaises(ValueError):
ipaddress.IPv6Network((ip, 96))
# strict=False and host bits set
net = ipaddress.IPv6Network('2001:db8::/96')
self.assertEqual(ipaddress.IPv6Network(('2001:db8::1', 96),
strict=False),
net)
self.assertEqual(ipaddress.IPv6Network(
(42540766411282592856903984951653826561, 96),
strict=False),
net)
self.assertEqual(ipaddress.IPv6Network((ip, 96), strict=False),
net)
# /96
self.assertEqual(ipaddress.IPv6Interface(('2001:db8::1', '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
self.assertEqual(ipaddress.IPv6Interface(
(42540766411282592856903984951653826561, '96')),
ipaddress.IPv6Interface('2001:db8::1/96'))
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') + 255,
ipaddress.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddress.IPv4Address('1.1.1.1') - 256,
ipaddress.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddress.IPv6Address('::1') + (2**16 - 2),
ipaddress.IPv6Address('::ffff'))
self.assertEqual(ipaddress.IPv6Address('::ffff') - (2**16 - 2),
ipaddress.IPv6Address('::1'))
def testInvalidIntToBytes(self):
self.assertRaises(ValueError, ipaddress.v4_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v4_int_to_packed,
2 ** ipaddress.IPV4LENGTH)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed, -1)
self.assertRaises(ValueError, ipaddress.v6_int_to_packed,
2 ** ipaddress.IPV6LENGTH)
def testInternals(self):
ip1 = ipaddress.IPv4Address('10.10.10.10')
ip2 = ipaddress.IPv4Address('10.10.10.11')
ip3 = ipaddress.IPv4Address('10.10.10.12')
self.assertEqual(list(ipaddress._find_address_range([ip1])),
[(ip1, ip1)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip3])),
[(ip1, ip1), (ip3, ip3)])
self.assertEqual(list(ipaddress._find_address_range([ip1, ip2, ip3])),
[(ip1, ip3)])
self.assertEqual(128, ipaddress._count_righthand_zero_bits(0, 128))
self.assertEqual("IPv4Network('1.2.3.0/24')", repr(self.ipv4_network))
def testMissingNetworkVersion(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*version"):
broken.version
def testMissingAddressClass(self):
class Broken(ipaddress._BaseNetwork):
pass
broken = Broken('127.0.0.1')
with self.assertRaisesRegex(NotImplementedError, "Broken.*address"):
broken._address_class
def testGetNetwork(self):
self.assertEqual(int(self.ipv4_network.network_address), 16909056)
self.assertEqual(str(self.ipv4_network.network_address), '1.2.3.0')
self.assertEqual(int(self.ipv6_network.network_address),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6_network.network_address),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6_network.hostmask),
'::ffff:ffff:ffff:ffff')
def testIpFromInt(self):
self.assertEqual(self.ipv4_interface._ip,
ipaddress.IPv4Interface(16909060)._ip)
ipv4 = ipaddress.ip_network('1.2.3.4')
ipv6 = ipaddress.ip_network('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddress.ip_network(int(ipv4.network_address)))
self.assertEqual(ipv6, ipaddress.ip_network(int(ipv6.network_address)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6_interface._ip,
ipaddress.IPv6Interface(v6_int)._ip)
self.assertEqual(ipaddress.ip_network(self.ipv4_address._ip).version,
4)
self.assertEqual(ipaddress.ip_network(self.ipv6_address._ip).version,
6)
def testIpFromPacked(self):
address = ipaddress.ip_address
self.assertEqual(self.ipv4_interface._ip,
ipaddress.ip_interface(b'\x01\x02\x03\x04')._ip)
self.assertEqual(address('255.254.253.252'),
address(b'\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6_interface.ip,
ipaddress.ip_interface(
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01').ip)
self.assertEqual(address('ffff:2:3:4:ffff::'),
address(b'\xff\xff\x00\x02\x00\x03\x00\x04' +
b'\xff\xff' + b'\x00' * 6))
self.assertEqual(address('::'),
address(b'\x00' * 16))
def testGetIp(self):
self.assertEqual(int(self.ipv4_interface.ip), 16909060)
self.assertEqual(str(self.ipv4_interface.ip), '1.2.3.4')
self.assertEqual(int(self.ipv6_interface.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6_interface.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4_network.netmask), 4294967040)
self.assertEqual(str(self.ipv4_network.netmask), '255.255.255.0')
self.assertEqual(int(self.ipv6_network.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6_network.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddress.IPv4Interface('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.network.netmask), 0)
self.assertEqual(ipv4_zero_netmask._prefix_from_prefix_string('0'), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0'))
self.assertTrue(ipv4_zero_netmask._is_valid_netmask('0.0.0.0'))
self.assertFalse(ipv4_zero_netmask._is_valid_netmask('invalid'))
ipv6_zero_netmask = ipaddress.IPv6Interface('::1/0')
self.assertEqual(int(ipv6_zero_netmask.network.netmask), 0)
self.assertEqual(ipv6_zero_netmask._prefix_from_prefix_string('0'), 0)
def testIPv4NetAndHostmasks(self):
net = self.ipv4_network
self.assertFalse(net._is_valid_netmask('invalid'))
self.assertTrue(net._is_valid_netmask('128.128.128.128'))
self.assertFalse(net._is_valid_netmask('128.128.128.127'))
self.assertFalse(net._is_valid_netmask('128.128.128.255'))
self.assertTrue(net._is_valid_netmask('255.128.128.128'))
self.assertFalse(net._is_hostmask('invalid'))
self.assertTrue(net._is_hostmask('128.255.255.255'))
self.assertFalse(net._is_hostmask('255.255.255.255'))
self.assertFalse(net._is_hostmask('1.2.3.4'))
net = ipaddress.IPv4Network('127.0.0.0/0.0.0.255')
self.assertEqual(net.prefixlen, 24)
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4_network.broadcast_address), 16909311)
self.assertEqual(str(self.ipv4_network.broadcast_address), '1.2.3.255')
self.assertEqual(int(self.ipv6_network.broadcast_address),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6_network.broadcast_address),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4_interface.network.prefixlen, 24)
self.assertEqual(self.ipv6_interface.network.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4_network.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4_network.supernet().network_address),
'1.2.2.0')
self.assertEqual(
ipaddress.IPv4Interface('0.0.0.0/0').network.supernet(),
ipaddress.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6_network.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6_network.supernet().network_address),
'2001:658:22a:cafe::')
self.assertEqual(ipaddress.IPv6Interface('::0/0').network.supernet(),
ipaddress.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4_network.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4_network.supernet(3).network_address),
'1.2.0.0')
self.assertEqual(self.ipv6_network.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6_network.supernet(3).network_address),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv4_network.supernet,
new_prefix=25)
self.assertEqual(self.ipv4_network.supernet(prefixlen_diff=2),
self.ipv4_network.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6_network.supernet,
prefixlen_diff=2, new_prefix=1)
self.assertRaises(ValueError, self.ipv6_network.supernet,
new_prefix=65)
self.assertEqual(self.ipv6_network.supernet(prefixlen_diff=2),
self.ipv6_network.supernet(new_prefix=62))
def testHosts(self):
hosts = list(self.ipv4_network.hosts())
self.assertEqual(254, len(hosts))
self.assertEqual(ipaddress.IPv4Address('1.2.3.1'), hosts[0])
self.assertEqual(ipaddress.IPv4Address('1.2.3.254'), hosts[-1])
# special case where only 1 bit is left for address
self.assertEqual([ipaddress.IPv4Address('2.0.0.0'),
ipaddress.IPv4Address('2.0.0.1')],
list(ipaddress.ip_network('2.0.0.0/31').hosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4_network.subnets(prefixlen_diff=3)),
sorted(self.ipv4_network.subnets(new_prefix=27)))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(new_prefix=23))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(prefixlen_diff=3,
new_prefix=27))
self.assertEqual(sorted(self.ipv6_network.subnets(prefixlen_diff=4)),
sorted(self.ipv6_network.subnets(new_prefix=68)))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(new_prefix=63))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(prefixlen_diff=4,
new_prefix=68))
def testGetSubnets(self):
self.assertEqual(list(self.ipv4_network.subnets())[0].prefixlen, 25)
self.assertEqual(str(list(
self.ipv4_network.subnets())[0].network_address),
'1.2.3.0')
self.assertEqual(str(list(
self.ipv4_network.subnets())[1].network_address),
'1.2.3.128')
self.assertEqual(list(self.ipv6_network.subnets())[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddress.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddress.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnets()]
subnets2 = [str(x) for x in ip.subnets(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4_network.subnets(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6_network.subnets(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(9))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(65))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(65))
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError,
self.ipv4_interface.network.supernet, 25)
self.assertRaises(ValueError,
self.ipv6_interface.network.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, list,
self.ipv4_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv4_network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_interface.network.subnets(-1))
self.assertRaises(ValueError, list,
self.ipv6_network.subnets(-1))
def testGetNum_Addresses(self):
self.assertEqual(self.ipv4_network.num_addresses, 256)
self.assertEqual(list(self.ipv4_network.subnets())[0].num_addresses,
128)
self.assertEqual(self.ipv4_network.supernet().num_addresses, 512)
self.assertEqual(self.ipv6_network.num_addresses, 18446744073709551616)
self.assertEqual(list(self.ipv6_network.subnets())[0].num_addresses,
9223372036854775808)
self.assertEqual(self.ipv6_network.supernet().num_addresses,
36893488147419103232)
def testContains(self):
self.assertIn(ipaddress.IPv4Interface('1.2.3.128/25'),
self.ipv4_network)
self.assertNotIn(ipaddress.IPv4Interface('1.2.4.1/24'),
self.ipv4_network)
# We can test addresses and string as well.
addr1 = ipaddress.IPv4Address('1.2.3.37')
self.assertIn(addr1, self.ipv4_network)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddress.IPv4Network('1.1.0.0/16').__contains__(
ipaddress.IPv4Network('1.0.0.0/15')))
def testNth(self):
self.assertEqual(str(self.ipv4_network[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4_network.__getitem__, 256)
self.assertEqual(str(self.ipv6_network[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddress.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv4_interface ==
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertFalse(self.ipv4_interface == '')
self.assertFalse(self.ipv4_interface == [])
self.assertFalse(self.ipv4_interface == 2)
self.assertTrue(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6_interface ==
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertFalse(self.ipv6_interface == '')
self.assertFalse(self.ipv6_interface == [])
self.assertFalse(self.ipv6_interface == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/24'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv4_interface !=
ipaddress.IPv6Interface('::1.2.3.4/24'))
self.assertTrue(self.ipv4_interface != '')
self.assertTrue(self.ipv4_interface != [])
self.assertTrue(self.ipv4_interface != 2)
self.assertTrue(self.ipv4_address !=
ipaddress.IPv4Address('1.2.3.5'))
self.assertTrue(self.ipv4_address != '')
self.assertTrue(self.ipv4_address != [])
self.assertTrue(self.ipv4_address != 2)
self.assertFalse(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv6Interface('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6_interface !=
ipaddress.IPv4Interface('1.2.3.4/23'))
self.assertTrue(self.ipv6_interface != '')
self.assertTrue(self.ipv6_interface != [])
self.assertTrue(self.ipv6_interface != 2)
self.assertTrue(self.ipv6_address !=
ipaddress.IPv4Address('1.2.3.4'))
self.assertTrue(self.ipv6_address != '')
self.assertTrue(self.ipv6_address != [])
self.assertTrue(self.ipv6_address != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface(
'1.2.3.4/255.255.255.255')), '1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddress.IPv6Interface('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddress.IPv4Interface('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
ip5 = ipaddress.IPv4Address('1.1.1.4')
ip6 = ipaddress.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses(
[ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30'),
ipaddress.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddress.IPv4Address('1.1.1.0')
ip2 = ipaddress.IPv4Address('1.1.1.1')
ip3 = ipaddress.IPv4Address('1.1.1.2')
ip4 = ipaddress.IPv4Address('1.1.1.3')
#ip5 = ipaddress.IPv4Interface('1.1.1.4/30')
#ip6 = ipaddress.IPv4Interface('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.1.0/30')])
# test only IP networks
ip1 = ipaddress.IPv4Network('1.1.0.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/24')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
ip4 = ipaddress.IPv4Network('1.1.3.0/24')
ip5 = ipaddress.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call
# [].sort
ip6 = ipaddress.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3, ip4, ip5,
ip6])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/22'),
ipaddress.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddress.collapse_addresses([ip1, ip2])
self.assertEqual(list(collapsed),
[ipaddress.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddress.IPv4Network('1.1.1.1/32')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddress.IPv4Address('1.1.1.1')
self.assertEqual(list(ipaddress.collapse_addresses(
[ip_same1, ip_same2])),
[ipaddress.ip_network('1.1.1.1/32')])
ip1 = ipaddress.IPv6Network('2001::/100')
ip2 = ipaddress.IPv6Network('2001::/120')
ip3 = ipaddress.IPv6Network('2001::/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddress.collapse_addresses([ip1, ip2, ip3])
self.assertEqual(list(collapsed), [ip3])
# the toejam test
addr_tuples = [
(ipaddress.ip_address('1.1.1.1'),
ipaddress.ip_address('::1')),
(ipaddress.IPv4Network('1.1.0.0/24'),
ipaddress.IPv6Network('2001::/120')),
(ipaddress.IPv4Network('1.1.0.0/32'),
ipaddress.IPv6Network('2001::/128')),
]
for ip1, ip2 in addr_tuples:
self.assertRaises(TypeError, ipaddress.collapse_addresses,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddress.ip_address
#ipnet = ipaddress.ip_network
summarize = ipaddress.summarize_address_range
ip1 = ipaddress.ip_address('1.1.1.0')
ip2 = ipaddress.ip_address('1.1.1.255')
# summarize works only for IPv4 & IPv6
class IPv7Address(ipaddress.IPv6Address):
@property
def version(self):
return 7
ip_invalid1 = IPv7Address('::1')
ip_invalid2 = IPv7Address('::1')
self.assertRaises(ValueError, list,
summarize(ip_invalid1, ip_invalid2))
# test that a summary over ip4 & ip6 fails
self.assertRaises(TypeError, list,
summarize(ip1, ipaddress.IPv6Address('::1')))
# test a /24 is summarized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('1.1.1.8')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1.1.1.0/29'),
ipaddress.ip_network('1.1.1.8')])
# all!
ip1 = ipaddress.IPv4Address(0)
ip2 = ipaddress.IPv4Address(ipaddress.IPv4Address._ALL_ONES)
self.assertEqual([ipaddress.IPv4Network('0.0.0.0/0')],
list(summarize(ip1, ip2)))
ip1 = ipaddress.ip_address('1::')
ip2 = ipaddress.ip_address('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(list(summarize(ip1, ip2))[0],
ipaddress.ip_network('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddress.ip_address('2::')
self.assertEqual(list(summarize(ip1, ip2)),
[ipaddress.ip_network('1::/16'),
ipaddress.ip_network('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, list,
summarize(ipaddress.ip_address('1.1.1.0'),
ipaddress.ip_address('1.1.0.0')))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_network('1.1.1.0'),
ipaddress.ip_network('1.1.0.0')))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, list,
summarize(ipaddress.ip_address('::'),
ipaddress.ip_network('1.1.0.0')))
def testAddressComparison(self):
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.1'))
self.assertTrue(ipaddress.ip_address('1.1.1.1') <=
ipaddress.ip_address('1.1.1.2'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::1'))
self.assertTrue(ipaddress.ip_address('::1') <=
ipaddress.ip_address('::2'))
def testInterfaceComparison(self):
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.1'))
self.assertTrue(ipaddress.ip_interface('1.1.1.1') <=
ipaddress.ip_interface('1.1.1.2'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::1'))
self.assertTrue(ipaddress.ip_interface('::1') <=
ipaddress.ip_interface('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddress.IPv4Network('1.1.1.0/24')
ip2 = ipaddress.IPv4Network('1.1.1.0/32')
ip3 = ipaddress.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip1), 0)
# if addresses are the same, sort by netmask
self.assertEqual(ip1.compare_networks(ip2), -1)
self.assertEqual(ip2.compare_networks(ip1), 1)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertEqual(ip3.compare_networks(ip1), 1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddress.IPv6Network('2001:2000::/96')
ip2 = ipaddress.IPv6Network('2001:2001::/96')
ip3 = ipaddress.IPv6Network('2001:ffff:2000::/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
self.assertRaises(TypeError,
self.ipv4_network.compare_networks,
self.ipv6_network)
ipv6 = ipaddress.IPv6Interface('::/0')
ipv4 = ipaddress.IPv4Interface('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddress.ip_network('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddress.ip_network('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddress.ip_network('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddress.ip_network('10.10.10.0/31')
ip2 = ipaddress.ip_network('10.10.10.0')
ip3 = ipaddress.ip_network('10.10.10.2/31')
ip4 = ipaddress.ip_network('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertIs(ip1.__lt__(ipaddress.ip_address('10.10.10.0')),
NotImplemented)
self.assertIs(ip2.__lt__(ipaddress.ip_address('10.10.10.0')),
NotImplemented)
# <=, >=
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('1.1.1.1') <=
ipaddress.ip_network('1.1.1.2'))
self.assertFalse(ipaddress.ip_network('1.1.1.2') <=
ipaddress.ip_network('1.1.1.1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::1'))
self.assertTrue(ipaddress.ip_network('::1') <=
ipaddress.ip_network('::2'))
self.assertFalse(ipaddress.ip_network('::2') <=
ipaddress.ip_network('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddress.ip_network, '192.168.1.1/24')
self.assertRaises(ValueError, ipaddress.ip_network, '::1/120')
def testOverlaps(self):
other = ipaddress.IPv4Network('1.2.3.0/30')
other2 = ipaddress.IPv4Network('1.2.2.0/24')
other3 = ipaddress.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4_network.overlaps(other))
self.assertFalse(self.ipv4_network.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddress.IPv4Interface(ipv4_string)
v4compat_ipv6 = ipaddress.IPv6Interface('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddress.IPv6Interface('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddress.AddressValueError, ipaddress.IPv6Interface,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddress.ip_address('::FFFF:192.0.2.1'),
ipaddress.ip_address('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddress.ip_address('FFFF::192.0.2.1'),
ipaddress.ip_address('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4_address.version, 4)
self.assertEqual(self.ipv6_address.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4_interface.max_prefixlen, 32)
self.assertEqual(self.ipv6_interface.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4_address.packed,
b'\x01\x02\x03\x04')
self.assertEqual(ipaddress.IPv4Interface('255.254.253.252').packed,
b'\xff\xfe\xfd\xfc')
self.assertEqual(self.ipv6_address.packed,
b'\x20\x01\x06\x58\x02\x2a\xca\xfe'
b'\x02\x00\x00\x00\x00\x00\x00\x01')
self.assertEqual(ipaddress.IPv6Interface('ffff:2:3:4:ffff::').packed,
b'\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ b'\x00' * 6)
self.assertEqual(ipaddress.IPv6Interface('::1:0:0:0:0').packed,
b'\x00' * 6 + b'\x00\x01' + b'\x00' * 8)
def testIpType(self):
ipv4net = ipaddress.ip_network('1.2.3.4')
ipv4addr = ipaddress.ip_address('1.2.3.4')
ipv6net = ipaddress.ip_network('::1.2.3.4')
ipv6addr = ipaddress.ip_address('::1.2.3.4')
self.assertEqual(ipaddress.IPv4Network, type(ipv4net))
self.assertEqual(ipaddress.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddress.IPv6Network, type(ipv6net))
self.assertEqual(ipaddress.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddress.ip_interface(
'224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddress.ip_network('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_network('240.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_interface(
'192.168.1.1/17').is_private)
self.assertEqual(False, ipaddress.ip_network('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_network(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_private)
self.assertEqual(False, ipaddress.ip_network('11.0.0.0').is_reserved)
self.assertEqual(True, ipaddress.ip_network(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_network('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_network('169.254.1.0/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_interface(
'169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddress.ip_interface(
'169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddress.ip_network(
'127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddress.ip_network(
'127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddress.ip_network('128.0.0.0').is_loopback)
self.assertEqual(False,
ipaddress.ip_network('100.64.0.0/10').is_private)
self.assertEqual(False, ipaddress.ip_network('100.64.0.0/10').is_global)
self.assertEqual(True,
ipaddress.ip_network('192.0.2.128/25').is_private)
self.assertEqual(True,
ipaddress.ip_network('192.0.3.0/24').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('0.0.0.0').is_unspecified)
self.assertEqual(True, ipaddress.ip_address('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddress.ip_address('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddress.ip_address('240.0.0.1').is_reserved)
self.assertEqual(False,
ipaddress.ip_address('239.255.255.255').is_reserved)
self.assertEqual(True, ipaddress.ip_address('192.168.1.1').is_private)
self.assertEqual(False, ipaddress.ip_address('192.169.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'10.255.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('11.0.0.0').is_private)
self.assertEqual(True, ipaddress.ip_address(
'172.31.255.255').is_private)
self.assertEqual(False, ipaddress.ip_address('172.32.0.0').is_private)
self.assertEqual(True,
ipaddress.ip_address('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddress.ip_address('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddress.ip_address('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddress.ip_address('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddress.ip_address('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddress.ip_network('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_network('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_network('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_network('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_network(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_network('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_network('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_network(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_network('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_network('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_network(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_network('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_interface('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddress.ip_interface('::1/127').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::').is_loopback)
self.assertEqual(False, ipaddress.ip_network('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_network('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::1').is_unspecified)
self.assertEqual(False, ipaddress.ip_network('::/127').is_unspecified)
self.assertEqual(True,
ipaddress.ip_network('2001::1/128').is_private)
self.assertEqual(True,
ipaddress.ip_network('200::1/128').is_global)
# test addresses
self.assertEqual(True, ipaddress.ip_address('ffff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address(2**128 - 1).is_multicast)
self.assertEqual(True, ipaddress.ip_address('ff00::').is_multicast)
self.assertEqual(False, ipaddress.ip_address('fdff::').is_multicast)
self.assertEqual(True, ipaddress.ip_address('fecf::').is_site_local)
self.assertEqual(True, ipaddress.ip_address(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address(
'fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddress.ip_address('ff00::').is_site_local)
self.assertEqual(True, ipaddress.ip_address('fc00::').is_private)
self.assertEqual(True, ipaddress.ip_address(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fbff:ffff::').is_private)
self.assertEqual(False, ipaddress.ip_address('fe00::').is_private)
self.assertEqual(True, ipaddress.ip_address('fea0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address(
'febf:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address(
'fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddress.ip_address('fec0::').is_link_local)
self.assertEqual(True, ipaddress.ip_address('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddress.ip_address('::1').is_loopback)
self.assertEqual(False, ipaddress.ip_address('::2').is_loopback)
self.assertEqual(True, ipaddress.ip_address('0::0').is_unspecified)
self.assertEqual(False, ipaddress.ip_address('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddress.ip_address('100::').is_reserved)
self.assertEqual(True, ipaddress.ip_network('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(
ipaddress.ip_address('::ffff:192.168.1.1').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
self.assertEqual(ipaddress.ip_address('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddress.ip_address('::ffff:c0a8:101').ipv4_mapped,
ipaddress.ip_address('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network('10.1.1.0/26')
addr3 = ipaddress.ip_network('10.2.1.0/24')
addr4 = ipaddress.ip_address('10.1.1.0')
addr5 = ipaddress.ip_network('2001:db8::0/32')
self.assertEqual(sorted(list(addr1.address_exclude(addr2))),
[ipaddress.ip_network('10.1.1.64/26'),
ipaddress.ip_network('10.1.1.128/25')])
self.assertRaises(ValueError, list, addr1.address_exclude(addr3))
self.assertRaises(TypeError, list, addr1.address_exclude(addr4))
self.assertRaises(TypeError, list, addr1.address_exclude(addr5))
self.assertEqual(list(addr1.address_exclude(addr1)), [])
def testHash(self):
self.assertEqual(hash(ipaddress.ip_interface('10.1.1.0/24')),
hash(ipaddress.ip_interface('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_network('10.1.1.0/24')),
hash(ipaddress.ip_network('10.1.1.0/24')))
self.assertEqual(hash(ipaddress.ip_address('10.1.1.0')),
hash(ipaddress.ip_address('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddress.ip_address('1.2.3.4')),
hash(ipaddress.ip_address(
int(ipaddress.ip_address('1.2.3.4')._ip))))
ip1 = ipaddress.ip_address('10.1.1.0')
ip2 = ipaddress.ip_address('1::')
dummy = {}
dummy[self.ipv4_address] = None
dummy[self.ipv6_address] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertIn(self.ipv4_address, dummy)
self.assertIn(ip2, dummy)
def testIPBases(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.compressed)
net = self.ipv6_network
self.assertRaises(ValueError, net._string_from_ip_int, 2**128 + 1)
def testIPv6NetworkHelpers(self):
net = self.ipv6_network
self.assertEqual('2001:658:22a:cafe::/64', net.with_prefixlen)
self.assertEqual('2001:658:22a:cafe::/ffff:ffff:ffff:ffff::',
net.with_netmask)
self.assertEqual('2001:658:22a:cafe::/::ffff:ffff:ffff:ffff',
net.with_hostmask)
self.assertEqual('2001:658:22a:cafe::/64', str(net))
def testIPv4NetworkHelpers(self):
net = self.ipv4_network
self.assertEqual('1.2.3.0/24', net.with_prefixlen)
self.assertEqual('1.2.3.0/255.255.255.0', net.with_netmask)
self.assertEqual('1.2.3.0/0.0.0.255', net.with_hostmask)
self.assertEqual('1.2.3.0/24', str(net))
def testCopyConstructor(self):
addr1 = ipaddress.ip_network('10.1.1.0/24')
addr2 = ipaddress.ip_network(addr1)
addr3 = ipaddress.ip_interface('2001:658:22a:cafe:200::1/64')
addr4 = ipaddress.ip_interface(addr3)
addr5 = ipaddress.IPv4Address('1.1.1.1')
addr6 = ipaddress.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddress.IPv4Address(addr5))
self.assertEqual(addr6, ipaddress.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in list(test_addresses.items()):
self.assertEqual(compressed, str(ipaddress.IPv6Interface(
uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddress.IPv6Interface('2001::1')
addr2 = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
addr3 = ipaddress.IPv6Network('2001::/96')
addr4 = ipaddress.IPv4Address('192.168.178.1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddress.IPv6Interface('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0000/96',
addr3.exploded)
self.assertEqual('192.168.178.1', addr4.exploded)
def testReversePointer(self):
addr1 = ipaddress.IPv4Address('127.0.0.1')
addr2 = ipaddress.IPv6Address('2001:db8::1')
self.assertEqual('1.0.0.127.in-addr.arpa', addr1.reverse_pointer)
self.assertEqual('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.' +
'b.d.0.1.0.0.2.ip6.arpa',
addr2.reverse_pointer)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4_address))
self.assertEqual(42540616829182469433547762482097946625,
int(self.ipv6_address))
def testForceVersion(self):
self.assertEqual(ipaddress.ip_network(1).version, 4)
self.assertEqual(ipaddress.IPv6Network(1).version, 6)
def testWithStar(self):
self.assertEqual(self.ipv4_interface.with_prefixlen, "1.2.3.4/24")
self.assertEqual(self.ipv4_interface.with_netmask,
"1.2.3.4/255.255.255.0")
self.assertEqual(self.ipv4_interface.with_hostmask,
"1.2.3.4/0.0.0.255")
self.assertEqual(self.ipv6_interface.with_prefixlen,
'2001:658:22a:cafe:200::1/64')
self.assertEqual(self.ipv6_interface.with_netmask,
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(self.ipv6_interface.with_hostmask,
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertNotIn('network_address', self.ipv4_network._cache)
self.assertNotIn('broadcast_address', self.ipv4_network._cache)
self.assertNotIn('hostmask', self.ipv4_network._cache)
# V4 - populate and test
self.assertEqual(self.ipv4_network.network_address,
ipaddress.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4_network.broadcast_address,
ipaddress.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4_network.hostmask,
ipaddress.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertIn('broadcast_address', self.ipv4_network._cache)
self.assertIn('hostmask', self.ipv4_network._cache)
# V6 - make sure we're empty
self.assertNotIn('broadcast_address', self.ipv6_network._cache)
self.assertNotIn('hostmask', self.ipv6_network._cache)
# V6 - populate and test
self.assertEqual(self.ipv6_network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6_interface.network.network_address,
ipaddress.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(
self.ipv6_network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
self.assertEqual(
self.ipv6_interface.network.broadcast_address,
ipaddress.IPv6Address('2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6_interface.network.hostmask,
ipaddress.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertIn('broadcast_address', self.ipv6_network._cache)
self.assertIn('hostmask', self.ipv6_network._cache)
self.assertIn('broadcast_address', self.ipv6_interface.network._cache)
self.assertIn('hostmask', self.ipv6_interface.network._cache)
def testTeredo(self):
# stolen from wikipedia
server = ipaddress.IPv4Address('65.54.227.120')
client = ipaddress.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddress.ip_address(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddress.ip_address(bad_addr).teredo)
# i77
teredo_addr = ipaddress.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddress.IPv4Address('94.245.121.253'),
ipaddress.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddress.ip_address('2002:ac1d:2d64::1')
bad_addr = ipaddress.ip_address('2000:ac1d:2d64::1')
self.assertEqual(ipaddress.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -7,343,013,277,948,015,000 | 43.173475 | 80 | 0.585574 | false |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/reportlab-3.2.0/docs/userguide/ch7_custom.py | 16 | 2670 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/docs/userguide/ch7_custom.py
from tools.docco.rl_doc_utils import *
heading1("Writing your own $Flowable$ Objects")
disc("""
Flowables are intended to be an open standard for creating
reusable report content, and you can easily create your
own objects. We hope that over time we will build up
a library of contributions, giving reportlab users a
rich selection of charts, graphics and other "report
widgets" they can use in their own reports. This section
shows you how to create your own flowables.""")
todo("""we should put the Figure class in the
standard library, as it is a very useful base.""")
heading2("A very simple $Flowable$")
disc("""
Recall the $hand$ function from the $pdfgen$ section of this user guide which
generated a drawing of a hand as a closed figure composed from Bezier curves.
""")
illust(examples.hand, "a hand")
disc("""
To embed this or any other drawing in a Platypus flowable we must define a
subclass of $Flowable$
with at least a $wrap$ method and a $draw$ method.
""")
eg(examples.testhandannotation)
disc("""
The $wrap$ method must provide the size of the drawing -- it is used by
the Platypus mainloop to decide whether this element fits in the space remaining
on the current frame. The $draw$ method performs the drawing of the object after
the Platypus mainloop has translated the $(0,0)$ origin to an appropriate location
in an appropriate frame.
""")
disc("""
Below are some example uses of the $HandAnnotation$ flowable.
""")
from reportlab.lib.colors import blue, pink, yellow, cyan, brown
from reportlab.lib.units import inch
handnote()
disc("""The default.""")
handnote(size=inch)
disc("""Just one inch high.""")
handnote(xoffset=3*inch, size=inch, strokecolor=blue, fillcolor=cyan)
disc("""One inch high and shifted to the left with blue and cyan.""")
heading2("Modifying a Built in $Flowable$")
disc("""To modify an existing flowable, you should create a derived class
and override the methods you need to change to get the desired behaviour""")
disc("""As an example to create a rotated image you need to override the wrap
and draw methods of the existing Image class""")
import os
from reportlab.platypus import *
I = '../images/replogo.gif'
EmbeddedCode("""
class RotatedImage(Image):
def wrap(self,availWidth,availHeight):
h, w = Image.wrap(self,availHeight,availWidth)
return w, h
def draw(self):
self.canv.rotate(90)
Image.draw(self)
I = RotatedImage('%s')
I.hAlign = 'CENTER'
""" % I,'I')
| mit | 826,627,433,552,622,300 | 31.962963 | 117 | 0.741948 | false |
Northrend/mxnet | example/sparse/matrix_factorization/data.py | 9 | 2524 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
from mxnet.test_utils import DummyIter
def get_movielens_data(data_dir, prefix):
if not os.path.exists(os.path.join(data_dir, "ml-10M100K")):
mx.test_utils.get_zip_data(data_dir,
"http://files.grouplens.org/datasets/movielens/%s.zip" % prefix,
prefix + ".zip")
assert os.path.exists(os.path.join(data_dir, "ml-10M100K"))
os.system("cd data/ml-10M100K; chmod +x allbut.pl; sh split_ratings.sh; cd -;")
def get_movielens_iter(filename, batch_size, dummy_iter):
"""Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
"""
print("Preparing data iterators for " + filename + " ... ")
user = []
item = []
score = []
with open(filename, 'r') as f:
num_samples = 0
for line in f:
tks = line.strip().split('::')
if len(tks) != 4:
continue
num_samples += 1
user.append((tks[0]))
item.append((tks[1]))
score.append((tks[2]))
if dummy_iter and num_samples > batch_size * 10:
break
# convert to ndarrays
user = mx.nd.array(user, dtype='int32')
item = mx.nd.array(item)
score = mx.nd.array(score)
# prepare data iters
data_train = {'user':user, 'item':item}
label_train = {'score':score}
iter_train = mx.io.NDArrayIter(data=data_train,label=label_train,
batch_size=batch_size, shuffle=True)
iter_train = DummyIter(iter_train) if dummy_iter else iter_train
return mx.io.PrefetchingIter(iter_train)
| apache-2.0 | -3,642,792,981,543,736,300 | 39.709677 | 99 | 0.635499 | false |
google-coral/pycoral | pycoral/learn/backprop/softmax_regression.py | 1 | 6303 | # Lint as: python3
# pylint:disable=g-doc-args,g-short-docstring-punctuation,g-no-space-after-docstring-summary
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A softmax regression model for on-device backpropagation of the last layer."""
from pycoral.pybind import _pywrap_coral
class SoftmaxRegression:
"""An implementation of the softmax regression function (multinominal logistic
regression) that operates as the last layer of your classification model, and
allows for on-device training with backpropagation (for this layer only).
The input for this layer must be an image embedding, which should be the
output of your embedding extractor (the backbone of your model). Once given
here, the input is fed to a fully-connected layer where weights and bias are
applied, and then passed to the softmax function to receive the final
probability distribution based on the number of classes for your model:
training/inference input (image embedding) --> fully-connected layer -->
softmax function
When you're conducting training with :func:`train_with_sgd`, the process uses
a cross-entropy loss function to measure the error and then update the weights
of the fully-connected layer (backpropagation).
When you're satisfied with the inference accuracy, call
:func:`serialize_model` to create a new model in `bytes` with this
retrained layer appended to your embedding extractor. You can then run
inferences with this new model as usual (using TensorFlow Lite interpreter
API).
.. note::
This last layer (FC + softmax) in the retrained model always runs on the
host CPU instead of the Edge TPU. As long as the rest of your embedding
extractor model is compiled for the Edge TPU, then running this last layer
on the CPU should not significantly affect the inference speed.
"""
def __init__(self,
feature_dim=None,
num_classes=None,
weight_scale=0.01,
reg=0.0):
"""For more detail, see the `Stanford CS231 explanation of the softmax
classifier <http://cs231n.github.io/linear-classify/#softmax>`_.
Args:
feature_dim (int): The dimension of the input feature (length of the
feature vector).
num_classes (int): The number of output classes.
weight_scale (float): A weight factor for computing new weights. The
backpropagated weights are drawn from standard normal distribution, then
multiplied by this number to keep the scale small.
reg (float): The regularization strength.
"""
self.model = _pywrap_coral.SoftmaxRegressionModelWrapper(
feature_dim, num_classes, weight_scale, reg)
def serialize_model(self, in_model_path):
"""Appends learned weights to your TensorFlow Lite model and serializes it.
Beware that learned weights and biases are quantized from float32 to uint8.
Args:
in_model_path (str): Path to the embedding extractor model (``.tflite``
file).
Returns:
The TF Lite model with new weights, as a `bytes` object.
"""
return self.model.AppendLayersToEmbeddingExtractor(in_model_path)
def get_accuracy(self, mat_x, labels):
"""Calculates the model's accuracy (percentage correct).
The calculation is on performing inferences on the given data and labels.
Args:
mat_x (:obj:`numpy.array`): The input data (image embeddings) to test,
as a matrix of shape ``NxD``, where ``N`` is number of inputs to test
and ``D`` is the dimension of the input feature (length of the feature
vector).
labels (:obj:`numpy.array`): An array of the correct label indices that
correspond to the test data passed in ``mat_x`` (class label index in
one-hot vector).
Returns:
The accuracy (the percent correct) as a float.
"""
return self.model.GetAccuracy(mat_x, labels)
def train_with_sgd(self,
data,
num_iter,
learning_rate,
batch_size=100,
print_every=100):
"""Trains your model using stochastic gradient descent (SGD).
The training data must be structured in a dictionary as specified in the
``data`` argument below. Notably, the training/validation images must be
passed as image embeddings, not as the original image input. That is, run
the images through your embedding extractor (the backbone of your graph) and
use the resulting image embeddings here.
Args:
data (dict): A dictionary that maps ``'data_train'`` to an array of
training image embeddings, ``'labels_train'`` to an array of training
labels, ``'data_val'`` to an array of validation image embeddings, and
``'labels_val'`` to an array of validation labels.
num_iter (int): The number of iterations to train.
learning_rate (float): The learning rate (step size) to use in training.
batch_size (int): The number of training examples to use in each
iteration.
print_every (int): The number of iterations for which to print the loss,
and training/validation accuracy. For example, ``20`` prints the stats
for every 20 iterations. ``0`` disables printing.
"""
train_config = _pywrap_coral.TrainConfigWrapper(num_iter, batch_size,
print_every)
training_data = _pywrap_coral.TrainingDataWrapper(data['data_train'],
data['data_val'],
data['labels_train'],
data['labels_val'])
self.model.Train(training_data, train_config, learning_rate)
| apache-2.0 | 7,578,411,613,842,141,000 | 43.076923 | 92 | 0.676503 | false |
pixelrebel/st2 | st2common/st2common/util/system_info.py | 10 | 1118 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
__all__ = [
'get_host_info',
'get_process_info'
]
def get_host_info():
host_info = {
'hostname': socket.gethostname()
}
return host_info
def get_process_info():
process_info = {
'hostname': socket.gethostname(),
'pid': os.getpid()
}
return process_info
| apache-2.0 | 4,820,010,311,632,731,000 | 29.216216 | 74 | 0.709302 | false |
j5shi/Thruster | pylibs/ctypes/test/test_arrays.py | 1 | 4390 | import unittest
from ctypes import *
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = range(15, 25)
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = range(42, 42+alen)
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray("a", "b", "c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], "a")
self.assertEqual(ca[1], "b")
self.assertEqual(ca[2], "c")
self.assertEqual(ca[-3], "a")
self.assertEqual(ca[-2], "b")
self.assertEqual(ca[-1], "c")
self.assertEqual(len(ca), 3)
# slicing is now supported, but not extended slicing (3-argument)!
from operator import getslice, delitem
self.assertRaises(TypeError, getslice, ca, 0, 1, -1)
# cannot delete items
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer("foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
try:
create_unicode_buffer
except NameError:
pass
else:
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 2,558,275,818,208,471,000 | 30.761194 | 74 | 0.530752 | false |
ScottBuchanan/eden | modules/tests/volunteer/create_volunteer_programme.py | 27 | 2099 | """ Sahana Eden Automated Test - HRM001 Create a Volunteer Programme
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class CreateVolunteerProgramme(SeleniumUnitTest):
def test_hrm001_create_volunteer_programme(self):
"""
@case: HRM001
@description: Create a Volunteer Programme
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
self.login(account="admin", nexturl="vol/programme/create")
self.create("hrm_programme",
[( "name",
"SAR"
),
( "comments",
"Comment/Description of the vol programme goes here."),
]
)
| mit | -2,989,896,398,113,434,000 | 40.176471 | 110 | 0.661744 | false |
pyKun/rally | rally/plugins/common/runners/rps.py | 3 | 8341 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import threading
import time
from rally.common import log as logging
from rally.common import utils
from rally import consts
from rally.task import runner
LOG = logging.getLogger(__name__)
def _worker_process(queue, iteration_gen, timeout, rps, times,
max_concurrent, context, cls, method_name,
args, aborted, info):
"""Start scenario within threads.
Spawn N threads per second. Each thread runs the scenario once, and appends
result to queue. A maximum of max_concurrent threads will be ran
concurrently.
:param queue: queue object to append results
:param iteration_gen: next iteration number generator
:param timeout: operation's timeout
:param rps: number of scenario iterations to be run per one second
:param times: total number of scenario iterations to be run
:param max_concurrent: maximum worker concurrency
:param context: scenario context object
:param cls: scenario class
:param method_name: scenario method name
:param args: scenario args
:param aborted: multiprocessing.Event that aborts load generation if
the flag is set
:param info: info about all processes count and counter of runned process
"""
pool = collections.deque()
start = time.time()
sleep = 1.0 / rps
runner._log_worker_info(times=times, rps=rps, timeout=timeout,
cls=cls, method_name=method_name, args=args)
time.sleep(
(sleep * info["processes_counter"]) / info["processes_to_start"])
i = 0
while i < times and not aborted.is_set():
scenario_context = runner._get_scenario_context(context)
scenario_args = (next(iteration_gen), cls, method_name,
scenario_context, args)
worker_args = (queue, scenario_args)
thread = threading.Thread(target=runner._worker_thread,
args=worker_args)
i += 1
thread.start()
pool.append(thread)
time_gap = time.time() - start
real_rps = i / time_gap if time_gap else "Infinity"
LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
(i, real_rps, rps))
# try to join latest thread(s) until it finished, or until time to
# start new thread (if we have concurrent slots available)
while i / (time.time() - start) > rps or len(pool) >= max_concurrent:
if pool:
pool[0].join(0.001)
if not pool[0].isAlive():
pool.popleft()
else:
time.sleep(0.001)
while pool:
thr = pool.popleft()
thr.join()
@runner.configure(name="rps")
class RPSScenarioRunner(runner.ScenarioRunner):
"""Scenario runner that does the job with specified frequency.
Every single benchmark scenario iteration is executed with specified
frequency (runs per second) in a pool of processes. The scenario will be
launched for a fixed number of times in total (specified in the config).
An example of a rps scenario is booting 1 VM per second. This
execution type is thus very helpful in understanding the maximal load that
a certain cloud can handle.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string"
},
"times": {
"type": "integer",
"minimum": 1
},
"rps": {
"type": "number",
"exclusiveMinimum": True,
"minimum": 0
},
"timeout": {
"type": "number",
},
"max_concurrency": {
"type": "integer",
"minimum": 1
},
"max_cpu_count": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": False
}
def _run_scenario(self, cls, method_name, context, args):
"""Runs the specified benchmark scenario with given arguments.
Every single benchmark scenario iteration is executed with specified
frequency (runs per second) in a pool of processes. The scenario will
be launched for a fixed number of times in total (specified in the
config).
:param cls: The Scenario class where the scenario is implemented
:param method_name: Name of the method that implements the scenario
:param context: Benchmark context that contains users, admin & other
information, that was created before benchmark started.
:param args: Arguments to call the scenario method with
:returns: List of results fore each single scenario iteration,
where each result is a dictionary
"""
times = self.config["times"]
timeout = self.config.get("timeout", 0) # 0 means no timeout
iteration_gen = utils.RAMInt()
cpu_count = multiprocessing.cpu_count()
max_cpu_used = min(cpu_count,
self.config.get("max_cpu_count", cpu_count))
processes_to_start = min(max_cpu_used, times,
self.config.get("max_concurrency", times))
rps_per_worker = float(self.config["rps"]) / processes_to_start
times_per_worker, times_overhead = divmod(times, processes_to_start)
# Determine concurrency per worker
concurrency_per_worker, concurrency_overhead = divmod(
self.config.get("max_concurrency", times), processes_to_start)
self._log_debug_info(times=times, timeout=timeout,
max_cpu_used=max_cpu_used,
processes_to_start=processes_to_start,
rps_per_worker=rps_per_worker,
times_per_worker=times_per_worker,
times_overhead=times_overhead,
concurrency_per_worker=concurrency_per_worker,
concurrency_overhead=concurrency_overhead)
result_queue = multiprocessing.Queue()
def worker_args_gen(times_overhead, concurrency_overhead):
"""Generate arguments for process worker.
Remainder of threads per process division is distributed to
process workers equally - one thread per each process worker
until the remainder equals zero. The same logic is applied
to concurrency overhead.
:param times_overhead: remaining number of threads to be
distributed to workers
:param concurrency_overhead: remaining number of maximum
concurrent threads to be distributed
to workers
"""
while True:
yield (result_queue, iteration_gen, timeout, rps_per_worker,
times_per_worker + (times_overhead and 1),
concurrency_per_worker + (concurrency_overhead and 1),
context, cls, method_name, args, self.aborted)
if times_overhead:
times_overhead -= 1
if concurrency_overhead:
concurrency_overhead -= 1
process_pool = self._create_process_pool(
processes_to_start, _worker_process,
worker_args_gen(times_overhead, concurrency_overhead))
self._join_processes(process_pool, result_queue)
| apache-2.0 | 7,961,433,140,420,070,000 | 38.530806 | 79 | 0.591536 | false |
hryamzik/ansible | lib/ansible/modules/network/cnos/cnos_factory.py | 10 | 5299 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Reset to factory settings of Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_factory
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Reset the switch's startup configuration to default (factory) on devices running Lenovo CNOS
description:
- This module allows you to reset a switch's startup configuration. The method provides a way to reset the
startup configuration to its factory settings. This is helpful when you want to move the switch to another
topology as a new network device.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_factory.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reset to factory
cnos_factory:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['ansible_ssh_user'] }}"
password: "{{ hostvars[inventory_hostname]['ansible_ssh_pass'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_factory_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Switch Startup Config is Reset to factory settings"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "save erase \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# cnos.debugOutput(cliCommand)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "[n]", 2, remote_conn)
output = output + cnos.waitForDeviceResponse("y" + "\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Switch Startup Config is Reset to factory settings ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,922,102,385,127,597,000 | 33.633987 | 139 | 0.701264 | false |
LarsMichelsen/pmatic | doc/conf.py | 2 | 8509 | # -*- coding: utf-8 -*-
#
# pmatic documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 6 14:59:43 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinxcontrib.images',
]
# config for sphinxcontrib.images
images_config = {
"default_image_width" : "auto",
"default_show_title" : True,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pmatic'
copyright = u'2016, Lars Michelsen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pmaticdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pmatic.tex', u'pmatic Documentation',
u'Lars Michelsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pmatic', u'pmatic Documentation',
[u'Lars Michelsen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pmatic', u'pmatic Documentation',
u'Lars Michelsen', 'pmatic', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autodoc_member_order = "bysource"
intersphinx_mapping = {'python': ('https://docs.python.org/2.7', None)}
| gpl-2.0 | -8,543,716,869,542,141,000 | 30.054745 | 79 | 0.704548 | false |
j00bar/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vmpools_facts.py | 26 | 3381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vmpools_facts
short_description: Retrieve facts about one or more oVirt vmpools
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve facts about one or more oVirt vmpools."
notes:
- "This module creates a new top-level C(ovirt_vmpools) fact, which
contains a list of vmpools."
options:
pattern:
description:
- "Search term which is accepted by oVirt search backend."
- "For example to search vmpool X: name=X"
extends_documentation_fragment: ovirt_facts
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather facts about all vm pools which names start with C(centos):
- ovirt_vmpools_facts:
pattern: name=centos*
- debug:
var: ovirt_vmpools
'''
RETURN = '''
ovirt_vm_pools:
description: "List of dictionaries describing the vmpools. Vm pool attribues are mapped to dictionary keys,
all vmpools attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_facts_full_argument_spec,
)
def main():
argument_spec = ovirt_facts_full_argument_spec(
pattern=dict(default='', required=False),
)
module = AnsibleModule(argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vmpools_service = connection.system_service().vm_pools_service()
vmpools = vmpools_service.list(search=module.params['pattern'])
module.exit_json(
changed=False,
ansible_facts=dict(
ovirt_vm_pools=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in vmpools
],
),
)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
| gpl-3.0 | 453,567,739,846,116,900 | 30.598131 | 136 | 0.649216 | false |
doismellburning/django | tests/utils_tests/test_dateparse.py | 10 | 4611 | from __future__ import unicode_literals
from datetime import date, time, datetime, timedelta
import unittest
from django.utils.dateparse import parse_date, parse_time, parse_datetime, parse_duration
from django.utils.timezone import get_fixed_timezone
class DateParseTests(unittest.TestCase):
def test_parse_date(self):
# Valid inputs
self.assertEqual(parse_date('2012-04-23'), date(2012, 4, 23))
self.assertEqual(parse_date('2012-4-9'), date(2012, 4, 9))
# Invalid inputs
self.assertEqual(parse_date('20120423'), None)
self.assertRaises(ValueError, parse_date, '2012-04-56')
def test_parse_time(self):
# Valid inputs
self.assertEqual(parse_time('09:15:00'), time(9, 15))
self.assertEqual(parse_time('10:10'), time(10, 10))
self.assertEqual(parse_time('10:20:30.400'), time(10, 20, 30, 400000))
self.assertEqual(parse_time('4:8:16'), time(4, 8, 16))
# Invalid inputs
self.assertEqual(parse_time('091500'), None)
self.assertRaises(ValueError, parse_time, '09:15:90')
def test_parse_datetime(self):
# Valid inputs
self.assertEqual(parse_datetime('2012-04-23T09:15:00'),
datetime(2012, 4, 23, 9, 15))
self.assertEqual(parse_datetime('2012-4-9 4:8:16'),
datetime(2012, 4, 9, 4, 8, 16))
self.assertEqual(parse_datetime('2012-04-23T09:15:00Z'),
datetime(2012, 4, 23, 9, 15, 0, 0, get_fixed_timezone(0)))
self.assertEqual(parse_datetime('2012-4-9 4:8:16-0320'),
datetime(2012, 4, 9, 4, 8, 16, 0, get_fixed_timezone(-200)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02:30'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(150)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400+02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(120)))
self.assertEqual(parse_datetime('2012-04-23T10:20:30.400-02'),
datetime(2012, 4, 23, 10, 20, 30, 400000, get_fixed_timezone(-120)))
# Invalid inputs
self.assertEqual(parse_datetime('20120423091500'), None)
self.assertRaises(ValueError, parse_datetime, '2012-04-56T09:15:90')
class DurationParseTests(unittest.TestCase):
def test_seconds(self):
self.assertEqual(parse_duration('30'), timedelta(seconds=30))
def test_minutes_seconds(self):
self.assertEqual(parse_duration('15:30'), timedelta(minutes=15, seconds=30))
self.assertEqual(parse_duration('5:30'), timedelta(minutes=5, seconds=30))
def test_hours_minutes_seconds(self):
self.assertEqual(parse_duration('10:15:30'), timedelta(hours=10, minutes=15, seconds=30))
self.assertEqual(parse_duration('1:15:30'), timedelta(hours=1, minutes=15, seconds=30))
self.assertEqual(parse_duration('100:200:300'), timedelta(hours=100, minutes=200, seconds=300))
def test_days(self):
self.assertEqual(parse_duration('4 15:30'), timedelta(days=4, minutes=15, seconds=30))
self.assertEqual(parse_duration('4 10:15:30'), timedelta(days=4, hours=10, minutes=15, seconds=30))
def test_fractions_of_seconds(self):
self.assertEqual(parse_duration('15:30.1'), timedelta(minutes=15, seconds=30, milliseconds=100))
self.assertEqual(parse_duration('15:30.01'), timedelta(minutes=15, seconds=30, milliseconds=10))
self.assertEqual(parse_duration('15:30.001'), timedelta(minutes=15, seconds=30, milliseconds=1))
self.assertEqual(parse_duration('15:30.0001'), timedelta(minutes=15, seconds=30, microseconds=100))
self.assertEqual(parse_duration('15:30.00001'), timedelta(minutes=15, seconds=30, microseconds=10))
self.assertEqual(parse_duration('15:30.000001'), timedelta(minutes=15, seconds=30, microseconds=1))
def test_negative(self):
self.assertEqual(parse_duration('-4 15:30'), timedelta(days=-4, minutes=15, seconds=30))
def test_iso_8601(self):
self.assertEqual(parse_duration('P4Y'), None)
self.assertEqual(parse_duration('P4M'), None)
self.assertEqual(parse_duration('P4W'), None)
self.assertEqual(parse_duration('P4D'), timedelta(days=4))
self.assertEqual(parse_duration('P0.5D'), timedelta(hours=12))
self.assertEqual(parse_duration('PT5H'), timedelta(hours=5))
self.assertEqual(parse_duration('PT5M'), timedelta(minutes=5))
self.assertEqual(parse_duration('PT5S'), timedelta(seconds=5))
self.assertEqual(parse_duration('PT0.000005S'), timedelta(microseconds=5))
| bsd-3-clause | 8,716,992,072,791,329,000 | 51.397727 | 107 | 0.664932 | false |
LaoZhongGu/kbengine | kbe/res/scripts/common/Lib/sre_parse.py | 4 | 28518 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
import sys
from sre_constants import *
from _sre import MAXREPEAT
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = set("0123456789")
OCTDIGITS = set("01234567")
HEXDIGITS = set("0123456789abcdefABCDEF")
WHITESPACE = set(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
seqtypes = (tuple, list)
for op, av in self.data:
print(level*" " + op, end=' '); nl = 0
if op == "in":
# member sublanguage
print(); nl = 1
for op, a in av:
print((level+1)*" " + op, a)
elif op == "branch":
print(); nl = 1
i = 0
for a in av[1]:
if i > 0:
print(level*" " + "or")
a.dump(level+1); nl = 1
i = i + 1
elif isinstance(av, seqtypes):
for a in av:
if isinstance(a, SubPattern):
if not nl: print()
a.dump(level+1); nl = 1
else:
print(a, end=' ') ; nl = 0
else:
print(av, end=' ') ; nl = 0
if not nl: print()
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0
UNITCODES = (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY)
REPEATCODES = (MIN_REPEAT, MAX_REPEAT)
for op, av in self.data:
if op is BRANCH:
i = sys.maxsize
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in REPEATCODES:
i, j = av[2].getwidth()
lo = lo + int(i) * av[0]
hi = hi + int(j) * av[1]
elif op in UNITCODES:
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxsize)), int(min(hi, sys.maxsize))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index:self.index+1]
# Special case for the str8, since indexing returns a integer
# XXX This is only needed for test_bug_926075 in test_re.py
if char and isinstance(char, bytes):
char = chr(char[0])
if char == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error("bogus escape (end of line)")
if isinstance(self.string, bytes):
c = chr(c)
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name[1:]:
if not isident(char) and not isdigit(char):
return False
return True
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] == IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error("bogus escape: %s" % repr("\\" + escape))
return LITERAL, int(escape, 16) & 0xff
elif c in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, int(escape, 8) & 0xff
elif c in DIGITS:
raise error("bogus escape: %s" % repr(escape))
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, int(escape[2:], 16) & 0xff
elif c == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, int(escape[1:], 8) & 0xff
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise error("cannot refer to open group")
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error("bogus escape: %s" % repr(escape))
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
while 1:
itemsappend(_parse(source, state))
if sourcematch("|"):
continue
if not nested:
break
if not source.next or sourcematch(")", 0):
break
else:
raise error("pattern not properly closed")
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
setappend = set.append
for item in items:
setappend(item[0])
subpatternappend((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup):
item_yes = _parse(source, state)
if source.match("|"):
item_no = _parse(source, state)
if source.match("|"):
raise error("conditional backref with more than two branches")
else:
item_no = None
if source.next and not source.match(")", 0):
raise error("pattern not properly closed")
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
_PATTERNENDERS = set("|)")
_ASSERTCHARS = set("=!<")
_LOOKBEHINDASSERTCHARS = set("=!")
_REPEATCODES = set([MIN_REPEAT, MAX_REPEAT])
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
PATTERNENDERS = _PATTERNENDERS
ASSERTCHARS = _ASSERTCHARS
LOOKBEHINDASSERTCHARS = _LOOKBEHINDASSERTCHARS
REPEATCODES = _REPEATCODES
while 1:
if source.next in PATTERNENDERS:
break # end of subpattern
this = sourceget()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = sourceget()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpatternappend((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = sourceget()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error("unexpected end of regular expression")
if sourcematch("-"):
# potential range
this = sourceget()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error("bad character range")
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error("bad character range")
setappend((RANGE, (lo, hi)))
else:
raise error("unexpected end of regular expression")
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, ord(this)))
continue
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if sourcematch(","):
while source.next in DIGITS:
hi = hi + sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise error("bad repeat interval")
else:
raise error("not supported")
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] == AT):
raise error("nothing to repeat")
if item[0][0] in REPEATCODES:
raise error("multiple repeat")
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
group = 1
name = None
condgroup = None
if sourcematch("?"):
group = 0
# options
if sourcematch("P"):
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ">":
break
name = name + char
group = 1
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
elif sourcematch("="):
# named backreference
name = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
name = name + char
if not name:
raise error("missing group name")
if not isname(name):
raise error("bad character in group name")
gid = state.groupdict.get(name)
if gid is None:
raise error("unknown group name")
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
raise error("unknown specifier: ?P%s" % char)
elif sourcematch(":"):
# non-capturing group
group = 2
elif sourcematch("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
sourceget()
if not sourcematch(")"):
raise error("unbalanced parenthesis")
continue
elif source.next in ASSERTCHARS:
# lookahead assertions
char = sourceget()
dir = 1
if char == "<":
if source.next not in LOOKBEHINDASSERTCHARS:
raise error("syntax error")
dir = -1 # lookbehind
char = sourceget()
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif sourcematch("("):
# conditional backreference group
condname = ""
while 1:
char = sourceget()
if char is None:
raise error("unterminated name")
if char == ")":
break
condname = condname + char
group = 2
if not condname:
raise error("missing group name")
if isname(condname):
condgroup = state.groupdict.get(condname)
if condgroup is None:
raise error("unknown group name")
else:
try:
condgroup = int(condname)
except ValueError:
raise error("bad character in group name")
else:
# flags
if not source.next in FLAGS:
raise error("unexpected end of pattern")
while source.next in FLAGS:
state.flags = state.flags | FLAGS[sourceget()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
if condgroup:
p = _parse_sub_cond(source, state, condgroup)
else:
p = _parse_sub(source, state)
if not sourcematch(")"):
raise error("unbalanced parenthesis")
if group is not None:
state.closegroup(group)
subpatternappend((SUBPATTERN, (group, p)))
else:
while 1:
char = sourceget()
if char is None:
raise error("unexpected end of pattern")
if char == ")":
break
raise error("unknown extension")
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
else:
raise error("parser error")
return subpattern
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("can't use UNICODE flag with a bytes pattern")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
tail = source.get()
if tail == ")":
raise error("unbalanced parenthesis")
elif tail:
raise error("bogus characters at end of regular expression")
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
p = []
a = p.append
def literal(literal, p=p, pappend=a):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
pappend((LITERAL, literal))
sep = source[:0]
if isinstance(sep, str):
makechar = chr
else:
makechar = chr
while 1:
this = sget()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
c = this[1:2]
if c == "g":
name = ""
if s.match("<"):
while 1:
char = sget()
if char is None:
raise error("unterminated group name")
if char == ">":
break
name = name + char
if not name:
raise error("missing group name")
try:
index = int(name)
if index < 0:
raise error("negative group number")
except ValueError:
if not isname(name):
raise error("bad character in group name")
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group name")
a((MARK, index))
elif c == "0":
if s.next in OCTDIGITS:
this = this + sget()
if s.next in OCTDIGITS:
this = this + sget()
literal(makechar(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this = this + sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this = this + sget()
isoctal = True
literal(makechar(int(this[1:], 8) & 0xff))
if not isoctal:
a((MARK, int(this[1:])))
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
groupsappend = groups.append
literals = [None] * len(p)
if isinstance(source, str):
encode = lambda x: x
else:
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
encode = lambda x: x.encode('latin1')
for c, s in p:
if c is MARK:
groupsappend((i, s))
# literal[i] is already None
else:
literals[i] = encode(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise error("unmatched group")
except IndexError:
raise error("invalid group reference")
return sep.join(literals)
| lgpl-3.0 | 6,969,117,105,192,253,000 | 33.483676 | 81 | 0.460481 | false |
brahmaroutu/test-infra | gubernator/github/admin.py | 22 | 4483 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import cPickle as pickle
import logging
import os
import webapp2
from google.appengine.api import urlfetch
from google.appengine.ext import deferred
from google.appengine.ext import ndb
import models
import handlers
# ndb model.query likes to use == True
# pylint: disable=singleton-comparison
class RecomputeOpenPRs(object):
keys_only = True
@staticmethod
def query():
return models.GHIssueDigest.query(
models.GHIssueDigest.is_open == True,
models.GHIssueDigest.is_pr == True
)
@staticmethod
def handle_entity(entity):
repo, number = entity.id().split(' ')
handlers.update_issue_digest(repo, number, always_put=True)
return {'puts': 1}
@ndb.toplevel
def migrate(migration, cursor=None, last_parent=None, stop=False):
entities, next_cursor, more = migration.query().fetch_page(
10, start_cursor=cursor, keys_only=migration.keys_only)
counters = collections.Counter()
for entity in entities:
changes = migration.handle_entity(entity)
counters.update(changes)
summary = ', '.join('%s: %d' % x for x in sorted(counters.items()))
if entities:
logging.info('fetched %d. %s. (%r-%r)',
len(entities), summary, entities[0], entities[-1])
if stop:
return
if more and next_cursor:
deferred.defer(migrate, migration, cursor=next_cursor, last_parent=last_parent)
class Digest(webapp2.RequestHandler):
def get(self):
results = models.GHIssueDigest.query(
models.GHIssueDigest.is_open == True)
self.response.headers['content-type'] = 'text/plain'
self.response.write(pickle.dumps(list(results), pickle.HIGHEST_PROTOCOL))
class AdminDash(webapp2.RequestHandler):
def get(self):
self.response.write("""
<form action="/admin/reprocess" method="post">
<button>Reprocess Open Issues/PRs</button><input type="checkbox" name="background">Background
</form>
<form action="/admin/digest_sync" method="post">
<button>Download GHIssueDigest from production</button>
</form>
""")
def check_csrf(self):
# https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet
# #Checking_The_Referer_Header
origin = self.request.headers.get('origin') + '/'
expected = self.request.host_url + '/'
if not (origin and origin == expected):
logging.error('csrf check failed for %s, origin: %r', self.request.url, origin)
self.abort(403)
class Reprocessor(AdminDash):
def post(self):
self.check_csrf()
migration = RecomputeOpenPRs()
if self.request.get('background'):
deferred.defer(migrate, migration)
self.response.write('running.')
else:
migrate(migration, stop=True)
class DigestSync(AdminDash):
def post(self):
if not os.environ['SERVER_SOFTWARE'].startswith('Development/'):
self.abort(400)
# For local development, download GHIssueDigests from the production
# server.
result = urlfetch.fetch(
'https://github-dot-k8s-gubernator.appspot.com/digest', deadline=60)
if result.status_code != 200:
self.abort(result.status_code)
body = result.content
self.response.headers['content-type'] = 'text/plain'
self.response.write('%s\n' % len(body))
self.response.write(repr(body[:8]))
results = pickle.loads(body)
for res in results:
res.key = ndb.Key(models.GHIssueDigest, res.key.id())
self.response.write('%s\n' % res.key)
res.put()
app = webapp2.WSGIApplication([
(r'/digest', Digest),
(r'/admin/?', AdminDash),
(r'/admin/reprocess', Reprocessor),
(r'/admin/digest_sync', DigestSync),
], debug=True)
| apache-2.0 | -2,501,444,223,143,768,000 | 31.722628 | 98 | 0.656257 | false |
caseyrollins/osf.io | api/base/metrics.py | 1 | 5367 | import re
from datetime import timedelta
import waffle
from django.utils import timezone
from api.base.exceptions import InvalidQueryStringError
from osf import features
class MetricsViewMixin(object):
"""Mixin for views that expose metrics via django-elasticsearch-metrics.
Enables metrics to be requested with a query parameter, like so: ::
/v2/myview?metrics[downloads]=monthly
Any subclass of this mixin MUST do the following:
* Use a serializer_class that subclasses MetricsSerializerMixin
* Define metric_map as a class variable. It should be dict mapping metric name
("downloads") to a Metric class (PreprintDownload)
* For list views: implement `get_annotated_queryset_with_metrics`
* For detail views: implement `add_metric_to_object`
"""
# Adapted from FilterMixin.QUERY_PATTERN
METRICS_QUERY_PATTERN = re.compile(r'^metrics\[(?P<metric_name>((?:,*\s*\w+)*))\]$')
TIMEDELTA_MAP = {
'daily': timedelta(hours=24),
'weekly': timedelta(days=7),
'monthly': timedelta(days=7),
'yearly': timedelta(days=365),
}
VALID_METRIC_PERIODS = {
'daily',
'weekly',
'monthly',
'yearly',
'total',
}
@property
def metric_map(self):
raise NotImplementedError('MetricsViewMixin sublcasses must define a metric_map class variable.')
def get_annotated_queryset_with_metrics(self, queryset, metric_class, metric_name, after):
"""Return a queryset annotated with metrics. Use for list endpoints that expose metrics."""
raise NotImplementedError('MetricsViewMixin subclasses must define get_annotated_queryset_with_metrics().')
def add_metric_to_object(self, obj, metric_class, metric_name, after):
"""Set an attribute for a metric on obj. Use for detail endpoints that expose metrics.
Return the modified object.
"""
raise NotImplementedError('MetricsViewMixin subclasses must define add_metric_to_object().')
@property
def metrics_requested(self):
return (
waffle.switch_is_active(features.ELASTICSEARCH_METRICS) and
bool(self.parse_metric_query_params(self.request.query_params))
)
# Adapted from FilterMixin.parse_query_params
# TODO: Should we get rid of query_params argument and use self.request.query_params instead?
def parse_metric_query_params(self, query_params):
"""Parses query parameters to a dict usable for fetching metrics.
:param dict query_params:
:return dict of the format {
<metric_name>: {
'period': <[daily|weekly|monthly|yearly|total]>,
}
}
"""
query = {}
for key, value in query_params.iteritems():
match = self.METRICS_QUERY_PATTERN.match(key)
if match:
match_dict = match.groupdict()
metric_name = match_dict['metric_name']
query[metric_name] = value
return query
def _add_metrics(self, queryset_or_obj, method):
"""Parse the ?metric[METRIC]=PERIOD query param, validate it, and
run ``method`` for each each requested object.
This is used to share code between add_metric_to_object and get_metrics_queryset.
"""
metrics_requested = self.parse_metric_query_params(self.request.query_params)
if metrics_requested:
metric_map = self.metric_map
for metric, period in metrics_requested.items():
if metric not in metric_map:
raise InvalidQueryStringError("Invalid metric in query string: '{}'".format(metric), parameter='metrics')
if period not in self.VALID_METRIC_PERIODS:
raise InvalidQueryStringError("Invalid period for metric: '{}'".format(period), parameter='metrics')
metric_class = metric_map[metric]
if period == 'total':
after = None
else:
after = timezone.now() - self.TIMEDELTA_MAP[period]
queryset_or_obj = method(queryset_or_obj, metric_class, metric, after)
return queryset_or_obj
def add_metrics_to_object(self, obj):
"""Helper method used for detail views."""
return self._add_metrics(obj, method=self.add_metric_to_object)
def get_metrics_queryset(self, queryset):
"""Helper method used for list views."""
return self._add_metrics(queryset, method=self.get_annotated_queryset_with_metrics)
# Override get_default_queryset for convenience
def get_default_queryset(self):
queryset = super(MetricsViewMixin, self).get_default_queryset()
return self.get_metrics_queryset(queryset)
class MetricsSerializerMixin(object):
@property
def available_metrics(self):
raise NotImplementedError(
'MetricSerializerMixin subclasses must define an available_metrics (set) class variable.',
)
# Override JSONAPISerializer
def get_meta(self, obj):
meta = super(MetricsSerializerMixin, self).get_meta(obj)
for metric in self.available_metrics:
if hasattr(obj, metric):
meta = meta or {'metrics': {}}
meta['metrics'][metric] = getattr(obj, metric)
return meta
| apache-2.0 | -7,549,455,710,052,056,000 | 39.659091 | 125 | 0.641327 | false |
rakuten-nlp/category2vec | word2vec.py | 2 | 42117 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via word2vec's "skip-gram and CBOW models", using either
hierarchical softmax or negative sampling [1]_ [2]_.
The training algorithms were originally ported from the C package https://code.google.com/p/word2vec/
and extended with additional functionality.
For a blog tutorial on gensim word2vec, with an interactive web app trained on GoogleNews, visit http://radimrehurek.com/2014/02/word2vec-tutorial/
**Install Cython with `pip install cython` to use optimized word2vec training** (70x speedup [3]_).
Initialize a model with e.g.::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Word2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Word2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the model. Some of them
are already built-in::
>>> model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> model.similarity('woman', 'man')
0.73723527
>>> model['computer'] # raw numpy vector of a word
array([-0.00449447, -0.00310097, 0.02421786, ...], dtype=float32)
and so on.
If you're finished training a model (=no more updates, only querying), you can do
>>> model.init_sims(replace=True)
to trim unneeded model memory = use (much) less RAM.
.. [1] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [2] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [3] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import sys
import os
import heapq
import time
from copy import deepcopy
import threading
try:
from queue import Queue
except ImportError:
from Queue import Queue
from numpy import exp, dot, zeros, outer, random, dtype, get_include, \
uint32, seterr, array, uint8, vstack, argsort, fromstring, sqrt, newaxis, ndarray, empty, sum as np_sum
logger = logging.getLogger("word2vec")
# from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
import utils, matutils # utility fnc for pickling, common scipy operations etc
from six import iteritems, itervalues, string_types
from six.moves import xrange
try:
from word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION, IS_DOUBLE
except ImportError:
try:
# try to compile and use the faster cython version
import pyximport
models_dir = os.path.dirname(__file__) or os.getcwd()
pyximport.install(setup_args={"include_dirs": [models_dir, get_include()]})
from word2vec_inner import train_sentence_sg, train_sentence_cbow, FAST_VERSION, IS_DOUBLE
except:
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
IS_DOUBLE = False
def train_sentence_sg(model, sentence, alpha, work=None):
"""
Update skip-gram model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
if model.negative:
# precompute negative labels
labels = zeros(model.negative + 1)
labels[0] = 1.0
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(model.window) # `b` in the original word2vec code
# now go over all words from the (reduced) window, predicting each one in turn
start = max(0, pos - model.window + reduced_window)
for pos2, word2 in enumerate(sentence[start : pos + model.window + 1 - reduced_window], start):
# don't train on OOV words and on the `word` itself
if word2 and not (pos2 == pos):
l1 = model.syn0[word2.index]
neu1e = zeros(l1.shape)
if model.hs:
# work on the entire tree at once, to push as much work into numpy's C routines as possible (performance)
l2a = deepcopy(model.syn1[word.point]) # 2d matrix, codelen x layer1_size
fa = 1.0 / (1.0 + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1 - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error (for this reason l2a is deepcopied)
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
model.syn0[word2.index] += neu1e # learn input -> hidden
return len([word for word in sentence if word is not None])
def train_sentence_cbow(model, sentence, alpha, work=None, neu1=None):
"""
Update CBOW model by training on a single sentence.
The sentence is a list of Vocab objects (or None, where the corresponding
word is not in the vocabulary. Called internally from `Word2Vec.train()`.
This is the non-optimized, Python version. If you have cython installed, gensim
will use the optimized version from word2vec_inner instead.
"""
if model.negative:
# precompute negative labels
labels = zeros(model.negative + 1)
labels[0] = 1.
for pos, word in enumerate(sentence):
if word is None:
continue # OOV word in the input sentence => skip
reduced_window = random.randint(model.window) # `b` in the original word2vec code
start = max(0, pos - model.window + reduced_window)
window_pos = enumerate(sentence[start : pos + model.window + 1 - reduced_window], start)
word2_indices = [word2.index for pos2, word2 in window_pos if (word2 is not None and pos2 != pos)]
l1 = np_sum(model.syn0[word2_indices], axis=0) # 1 x layer1_size
if word2_indices and model.cbow_mean:
l1 /= len(word2_indices)
neu1e = zeros(l1.shape)
if model.hs:
l2a = model.syn1[word.point] # 2d matrix, codelen x layer1_size
fa = 1. / (1. + exp(-dot(l1, l2a.T))) # propagate hidden -> output
ga = (1. - word.code - fa) * alpha # vector of error gradients multiplied by the learning rate
model.syn1[word.point] += outer(ga, l1) # learn hidden -> output
neu1e += dot(ga, l2a) # save error
if model.negative:
# use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
word_indices = [word.index]
while len(word_indices) < model.negative + 1:
w = model.table[random.randint(model.table.shape[0])]
if w != word.index:
word_indices.append(w)
l2b = model.syn1neg[word_indices] # 2d matrix, k+1 x layer1_size
fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
gb = (labels - fb) * alpha # vector of error gradients multiplied by the learning rate
model.syn1neg[word_indices] += outer(gb, l1) # learn hidden -> output
neu1e += dot(gb, l2b) # save error
model.syn0[word2_indices] += neu1e # learn input -> hidden, here for all words in the window separately
return len([word for word in sentence if word is not None])
if IS_DOUBLE:
from numpy import float64 as REAL
else:
from numpy import float32 as REAL
class Vocab(object):
"""A single vocabulary item, used internally for constructing binary trees (incl. both word leaves and inner nodes)."""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "<" + ', '.join(vals) + ">"
class Word2Vec(utils.SaveLoad):
"""
Class for training, using and evaluating neural networks described in https://code.google.com/p/word2vec/
The model can be stored/loaded via its `save()` and `load()` methods, or stored/loaded in a format
compatible with the original word2vec implementation via `save_word2vec_format()` and `load_word2vec_format()`.
"""
def __init__(self, sentences=None, size=100, alpha=0.025, window=5, min_count=5,
sample=0, seed=1, workers=1, min_alpha=0.0001, sg=1, hs=1, negative=0, cbow_mean=0):
"""
Initialize the model from an iterable of `sentences`. Each sentence is a
list of words (unicode strings) that will be used for training.
The `sentences` iterable can be simply a list, but for larger corpora,
consider an iterable that streams the sentences directly from disk/network.
See :class:`BrownCorpus`, :class:`Text8Corpus` or :class:`LineSentence` in
this module for such examples.
If you don't supply `sentences`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`sg` defines the training algorithm. By default (`sg=1`), skip-gram is used. Otherwise, `cbow` is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the current and predicted word within a sentence.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
`min_count` = ignore all words with total frequency lower than this.
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines)
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0)
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20)
`cbow_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when cbow is used.
"""
self.vocab = {} # mapping from a word (string) to a Vocab object
self.index2word = [] # map from a word's matrix index (int) to word (string)
self.sg = int(sg)
self.table = None # for negative sampling --> this needs a lot of RAM! consider setting back to None before saving
self.layer1_size = int(size)
if size % 4 != 0:
logger.warning("consider setting layer size to a multiple of 4 for greater performance")
self.alpha = float(alpha)
self.window = int(window)
self.seed = seed
self.min_count = min_count
self.sample = sample
self.workers = workers
self.min_alpha = min_alpha
self.hs = hs
self.negative = negative
self.cbow_mean = int(cbow_mean)
if sentences is not None:
self.build_vocab(sentences)
self.train(sentences)
def make_table(self, table_size=100000000, power=0.75):
"""
Create a table using stored vocabulary word counts for drawing random words in the negative
sampling training routines.
Called internally from `build_vocab()`.
"""
logger.info("constructing a table with noise distribution from %i words" % len(self.vocab))
# table (= list of words) of noise distribution for negative sampling
vocab_size = len(self.index2word)
self.table = zeros(table_size, dtype=uint32)
if not vocab_size:
logger.warning("empty vocabulary in word2vec, is this intended?")
return
# compute sum of all power (Z in paper)
train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))
# go through the whole table and fill it up with the word indexes proportional to a word's count**power
widx = 0
# normalize count^0.75 by Z
d1 = self.vocab[self.index2word[widx]].count**power / train_words_pow
for tidx in xrange(table_size):
self.table[tidx] = widx
if 1.0 * tidx / table_size > d1:
widx += 1
d1 += self.vocab[self.index2word[widx]].count**power / train_words_pow
if widx >= vocab_size:
widx = vocab_size - 1
def create_binary_tree(self):
"""
Create a binary Huffman tree using stored vocabulary word counts. Frequent words
will have shorter binary codes. Called internally from `build_vocab()`.
"""
logger.info("constructing a huffman tree from %i words" % len(self.vocab))
# build the huffman tree
heap = list(itervalues(self.vocab))
heapq.heapify(heap)
for i in xrange(len(self.vocab) - 1):
min1, min2 = heapq.heappop(heap), heapq.heappop(heap)
heapq.heappush(heap, Vocab(count=min1.count + min2.count, index=i + len(self.vocab), left=min1, right=min2))
# recurse over the tree, assigning a binary code to each vocabulary word
if heap:
max_depth, stack = 0, [(heap[0], [], [])]
while stack:
node, codes, points = stack.pop()
if node.index < len(self.vocab):
# leaf node => store its path from the root
node.code, node.point = codes, points
max_depth = max(len(codes), max_depth)
else:
# inner node => continue recursion
points = array(list(points) + [node.index - len(self.vocab)], dtype=uint32)
stack.append((node.left, array(list(codes) + [0], dtype=uint8), points))
stack.append((node.right, array(list(codes) + [1], dtype=uint8), points))
logger.info("built huffman tree with maximum node depth %i" % max_depth)
def precalc_sampling(self):
"""Precalculate each vocabulary item's threshold for sampling"""
if self.sample:
logger.info("frequent-word downsampling, threshold %g; progress tallies will be approximate" % (self.sample))
total_words = sum(v.count for v in itervalues(self.vocab))
threshold_count = float(self.sample) * total_words
for v in itervalues(self.vocab):
prob = (sqrt(v.count / threshold_count) + 1) * (threshold_count / v.count) if self.sample else 1.0
v.sample_probability = min(prob, 1.0)
def build_vocab(self, sentences):
"""
Build vocabulary from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
logger.info("collecting all words and their counts")
sentence_no, vocab = -1, {}
total_words = 0
for sentence_no, sentence in enumerate(sentences):
if sentence_no % 10000 == 0:
logger.info("PROGRESS: at sentence #%i, processed %i words and %i word types" %
(sentence_no, total_words, len(vocab)))
if isinstance(sentence,tuple): sentence = sentence[0] #10/11/2014 added
for word in sentence:
total_words += 1
if word in vocab:
vocab[word].count += 1
else:
vocab[word] = Vocab(count=1)
logger.info("collected %i word types from a corpus of %i words and %i sentences" %
(len(vocab), total_words, sentence_no + 1))
# assign a unique index to each word
self.vocab, self.index2word = {}, []
for word, v in iteritems(vocab):
if v.count >= self.min_count:
v.index = len(self.vocab)
self.index2word.append(word)
self.vocab[word] = v
logger.info("total %i word types after removing those with count<%s" % (len(self.vocab), self.min_count))
if self.hs:
# add info about each word's Huffman encoding
self.create_binary_tree()
if self.negative:
# build the table for drawing random words (for negative sampling)
self.make_table()
# precalculate downsampling thresholds
self.precalc_sampling()
self.reset_weights()
def train(self, sentences, total_words=None, word_count=0, chunksize=100):
"""
Update the model's neural weights from a sequence of sentences (can be a once-only generator stream).
Each sentence must be a list of unicode strings.
"""
if FAST_VERSION < 0:
import warnings
warnings.warn("Cython compilation failed, training will be slow. Do you have Cython installed? `pip install cython`")
logger.info("training model with %i workers on %i vocabulary and %i features, "
"using 'skipgram'=%s 'hierarchical softmax'=%s 'subsample'=%s and 'negative sampling'=%s and optimization=%i" %
(self.workers, len(self.vocab), self.layer1_size, self.sg, self.hs, self.sample, self.negative,FAST_VERSION))
if not self.vocab:
raise RuntimeError("you must first build vocabulary before training the model")
start, next_report = time.time(), [1.0]
word_count = [word_count]
total_words = total_words or int(sum(v.count * v.sample_probability for v in itervalues(self.vocab)))
jobs = Queue(maxsize=2 * self.workers) # buffer ahead only a limited number of jobs.. this is the reason we can't simply use ThreadPool :(
lock = threading.Lock() # for shared state (=number of words trained so far, log reports...)
def worker_train():
"""Train the model, lifting lists of sentences from the jobs queue."""
work = zeros(self.layer1_size, dtype=REAL) # each thread must have its own work memory
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
while True:
job = jobs.get()
if job is None: # data finished, exit
break
# update the learning rate before every job
alpha = max(self.min_alpha, self.alpha * (1 - 1.0 * word_count[0] / total_words))
# how many words did we train on? out-of-vocabulary (unknown) words do not count
if self.sg:
job_words = sum(train_sentence_sg(self, sentence, alpha, work) for sentence in job)
else:
job_words = sum(train_sentence_cbow(self, sentence, alpha, work, neu1) for sentence in job)
with lock:
word_count[0] += job_words
elapsed = time.time() - start
if elapsed >= next_report[0]:
logger.info("PROGRESS: at %.2f%% words, alpha %.05f, %.0f words/s" %
(100.0 * word_count[0] / total_words, alpha, word_count[0] / elapsed if elapsed else 0.0))
next_report[0] = elapsed + 1.0 # don't flood the log, wait at least a second between progress reports
workers = [threading.Thread(target=worker_train) for _ in xrange(self.workers)]
for thread in workers:
thread.daemon = True # make interrupting the process with ctrl+c easier
thread.start()
def prepare_sentences():
for sentence in sentences:
# avoid calling random_sample() where prob >= 1, to speed things up a little:
sampled = [self.vocab[word] for word in sentence
if word in self.vocab and (self.vocab[word].sample_probability >= 1.0 or self.vocab[word].sample_probability >= random.random_sample())]
yield sampled
# convert input strings to Vocab objects (eliding OOV/downsampled words), and start filling the jobs queue
for job_no, job in enumerate(utils.grouper(prepare_sentences(), chunksize)):
logger.debug("putting job #%i in the queue, qsize=%i" % (job_no, jobs.qsize()))
jobs.put(job)
logger.info("reached the end of input; waiting to finish %i outstanding jobs" % jobs.qsize())
for _ in xrange(self.workers):
jobs.put(None) # give the workers heads up that they can finish -- no more work!
for thread in workers:
thread.join()
elapsed = time.time() - start
logger.info("training on %i words took %.1fs, %.0f words/s" %
(word_count[0], elapsed, word_count[0] / elapsed if elapsed else 0.0))
return word_count[0]
def reset_weights(self):
"""Reset all projection weights to an initial (untrained) state, but keep the existing vocabulary."""
logger.info("resetting layer weights")
random.seed(self.seed)
self.syn0 = matutils.zeros_aligned((len(self.vocab), self.layer1_size), dtype=REAL)
# randomize weights vector by vector, rather than materializing a huge random matrix in RAM at once
for i in xrange(len(self.vocab)):
self.syn0[i] = (random.rand(self.layer1_size) - 0.5) / self.layer1_size
if self.hs:
self.syn1 = matutils.zeros_aligned((len(self.vocab), self.layer1_size), dtype=REAL)
if self.negative:
self.syn1neg = matutils.zeros_aligned((len(self.vocab), self.layer1_size), dtype=REAL)
self.syn0norm = None
def save_word2vec_format(self, fname, fvocab=None, binary=False):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
"""
if fvocab is not None:
logger.info("Storing vocabulary in %s" % (fvocab))
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s" % (len(self.vocab), self.layer1_size, fname))
assert (len(self.vocab), self.layer1_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % self.syn0.shape))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, norm_only=True):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s" % (fvocab))
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s" % (fname))
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline())
vocab_size, layer1_size = map(int, header.split()) # throws for invalid file format
result = Word2Vec(size=layer1_size)
result.syn0 = zeros((vocab_size, layer1_size), dtype=REAL)
if binary:
binary_len = dtype(REAL).itemsize * layer1_size
for line_no in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch != b'\n': # ignore newlines in front of words (some binary files have newline, some don't)
word.append(ch)
word = utils.to_unicode(b''.join(word))
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = fromstring(fin.read(binary_len), dtype=REAL)
else:
for line_no, line in enumerate(fin):
parts = utils.to_unicode(line).split()
if len(parts) != layer1_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % (line_no))
word, weights = parts[0], map(REAL, parts[1:])
if counts is None:
result.vocab[word] = Vocab(index=line_no, count=vocab_size - line_no)
elif word in counts:
result.vocab[word] = Vocab(index=line_no, count=counts[word])
else:
logger.warning("vocabulary file is incomplete")
result.vocab[word] = Vocab(index=line_no, count=None)
result.index2word.append(word)
result.syn0[line_no] = weights
logger.info("loaded %s matrix from %s" % (result.syn0.shape, fname))
result.init_sims(norm_only)
return result
def most_similar(self, positive=[], negative=[], topn=10):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words, and corresponds to the `word-analogy` and
`distance` scripts in the original word2vec implementation.
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [(word, 1.0) if isinstance(word, string_types + (ndarray,))
else word for word in positive]
negative = [(word, -1.0) if isinstance(word, string_types + (ndarray,))
else word for word in negative]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
elif word in self.vocab:
mean.append(weight * self.syn0norm[self.vocab[word].index])
all_words.add(self.vocab[word].index)
else:
raise KeyError("word '%s' not in vocabulary" % word)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
dists = dot(self.syn0norm, mean)
if not topn:
return dists
best = argsort(dists)[::-1][:topn + len(all_words)]
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
words = [word for word in words if word in self.vocab] # filter out OOV words
logger.debug("using words %s" % words)
if not words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.syn0norm[self.vocab[word].index] for word in words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, words))[0][1]
def __getitem__(self, word):
"""
Return a word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model['woman']
array([ -1.40128313e-02, ...]
"""
return self.syn0[self.vocab[word].index]
def __contains__(self, word):
return word in self.vocab
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
if hasattr(self, 'syn1'):
del self.syn1
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def accuracy(self, questions, restrict_vocab=30000):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See https://code.google.com/p/word2vec/source/browse/trunk/questions-words.txt for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word whose frequency
is not in the top-N most frequent words (default top 30,000).
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = dict(sorted(iteritems(self.vocab),
key=lambda item: -item[1].count)[:restrict_vocab])
ok_index = set(v.index for v in itervalues(ok_vocab))
def log_accuracy(section):
correct, incorrect = section['correct'], section['incorrect']
if correct + incorrect > 0:
logger.info("%s: %.1f%% (%i/%i)" %
(section['section'], 100.0 * correct / (correct + incorrect),
correct, correct + incorrect))
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': 0, 'incorrect': 0}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
a, b, c, expected = [word.lower() for word in line.split()] # TODO assumes vocabulary preprocessing uses lowercase, too...
except:
logger.info("skipping invalid line #%i in %s" % (line_no, questions))
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s" % (line_no, line))
continue
ignore = set(self.vocab[v].index for v in [a, b, c]) # indexes of words to ignore
predicted = None
# find the most likely prediction, ignoring OOV words and input words
for index in argsort(self.most_similar(positive=[b, c], negative=[a], topn=False))[::-1]:
if index in ok_index and index not in ignore:
predicted = self.index2word[index]
if predicted != expected:
logger.debug("%s: expected %s, predicted %s" % (line.strip(), expected, predicted))
break
section['correct' if predicted == expected else 'incorrect'] += 1
if section:
# store the last section, too
sections.append(section)
log_accuracy(section)
total = {'section': 'total', 'correct': sum(s['correct'] for s in sections), 'incorrect': sum(s['incorrect'] for s in sections)}
log_accuracy(total)
sections.append(total)
return sections
def __str__(self):
return "Word2Vec(vocab=%s, size=%s, alpha=%s)" % (len(self.index2word), self.layer1_size, self.alpha)
def save(self, *args, **kwargs):
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm']) # don't bother storing the cached normalized vectors
super(Word2Vec, self).save(*args, **kwargs)
class BrownCorpus(object):
"""Iterate over sentences from the Brown corpus (part of NLTK data)."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for line in utils.smart_open(fname):
line = utils.to_unicode(line)
# each file line is a single sentence in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty sentences
continue
yield words
class Text8Corpus(object):
"""Iterate over sentences from the "text8" corpus, unzipped from http://mattmahoney.net/dc/text8.zip ."""
def __init__(self, fname):
self.fname = fname
def __iter__(self):
# the entire corpus is one gigantic line -- there are no sentence marks at all
# so just split the sequence of tokens arbitrarily: 1 sentence = 1000 tokens
sentence, rest, max_sentence_length = [], b'', 1000
with utils.smart_open(self.fname) as fin:
while True:
text = rest + fin.read(8192) # avoid loading the entire file (=1 line) into RAM
if text == rest: # EOF
sentence.extend(rest.split()) # return the last chunk of words, too (may be shorter/longer)
if sentence:
yield sentence
break
last_token = text.rfind(b' ') # the last token may have been split in two... keep it for the next iteration
words, rest = (utils.to_unicode(text[:last_token]).split(), text[last_token:].strip()) if last_token >= 0 else ([], text)
sentence.extend(words)
while len(sentence) >= max_sentence_length:
yield sentence[:max_sentence_length]
sentence = sentence[max_sentence_length:]
class LineSentence(object):
"""Simple format: one sentence = one line; words already preprocessed and separated by whitespace."""
def __init__(self, source):
"""
`source` can be either a string or a file object.
Example::
sentences = LineSentence('myfile.txt')
Or for compressed files::
sentences = LineSentence('compressed_text.txt.bz2')
sentences = LineSentence('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for line in self.source:
yield utils.to_unicode(line).split()
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for line in fin:
yield utils.to_unicode(line).split()
# Example: ./word2vec.py ~/workspace/word2vec/text8 ~/workspace/word2vec/questions-words.txt ./text8
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s" % " ".join(sys.argv))
logging.info("using optimization %s" % FAST_VERSION)
# check and process cmdline input
program = os.path.basename(sys.argv[0])
if len(sys.argv) < 2:
print(globals()['__doc__'] % locals())
sys.exit(1)
seterr(all='raise') # don't ignore numpy errors
if len(sys.argv) > 1:
input_file = sys.argv[1]
model = Word2Vec(LineSentence(input_file), size=100, window=5, min_count=5, workers=8)
model.save(input_file + '.model')
model.save_word2vec_format(input_file + '.vec')
else:
pass
program = os.path.basename(sys.argv[0])
logging.info("finished running %s" % program)
| lgpl-3.0 | 5,257,621,125,906,808,000 | 46.482525 | 167 | 0.586485 | false |
yutiansut/QUANTAXIS | QUANTAXIS_Test/QAFactor_Test/(日线)KDJ测试.py | 2 | 3982 | import jqdatasdk
import pandas as pd
import QUANTAXIS as QA
from QUANTAXIS.QAFactor.analyze import FactorAnalyzer
from QUANTAXIS.QAFactor.data import DataApi
import QUANTAXIS.QAFactor.preprocess as preprocess
import QUANTAXIS.QAFactor.tears as tears
import QUANTAXIS.QAFactor.utils as utils
jqdatasdk.auth("聚宽用户名", "聚宽密码")
# 因子股票池
code_list = QA.QA_fetch_stock_block_adv().get_block(
"白酒").index.get_level_values("code").tolist()
# 数据时间范围
start_date = '2018-01-01'
end_date = '2019-09-30'
# 日线数据
# 注意:这里需要取出 dataframe 格式数据
stock_data = QA.QA_fetch_stock_day_adv(code=code_list,
start=start_date,
end=end_date).to_qfq().data
# 原始因子产生
kdj = stock_data.groupby("code").apply(
QA.QAIndicator.QA_indicator_KDJ).droplevel(level=-1)
k_value = kdj["KDJ_K"]
d_value = kdj["KDJ_D"]
j_value = kdj["KDJ_J"]
# 原始因子处理
# 1. 测试 J 值
factor = d_value
# 因子格式化:
# - 修改因子索引 level 为先日期,后股票代码
# - 修改因子索引名称为 ['datetime', 'code']
factor = preprocess.QA_fmt_factor(factor)
# 上市日期过滤:这里使用了聚宽的数据源,需要提前导入聚宽本地数据源
# QA_fetch_get_factor_start_date 做了两件事
# 1. 因子格式化
# 2. 对因子股票池附加上市日期
factor_data = preprocess.QA_fetch_get_factor_start_date(factor)
factor_data = factor_data.reset_index()
factor_data = factor_data.loc[(factor_data.datetime - factor_data.start_date
) > pd.Timedelta("200D")].set_index(
["datetime", "code"])
factor = factor_data['factor']
# 极值处理,默认使用 "MAD"
# QA_winsorize_factor 默认使用 “MAD” 极值处理方式, 按日期分组进行极值处理
factor = preprocess.QA_winsorize_factor(factor)
# 因子数据的日期索引作为参数传入 DataApi,方便其处理行业、权重数据
factor_time_range = factor.index.remove_unused_levels().get_level_values(
"datetime").tolist()
# 量价数据、行业数据、权重数据导入
# 行业数据目前采用聚宽数据源,权重数据直接使用 QUANTAXIS 本地数据,由于 QA 对于权重计算使用的是前复权
# 计算方式,后复权计算远期收益可能会有纰漏,建议直接使用前复权数据计算远期收益率
# 注意:如果在单因子处理的时候已经分行业处理过,不建议直接采用聚宽数据源的行业数据,不同的行业分类标准
# 差异会很大,在 DataApi 支持直接输入行业数据,譬如这里我们也可以除了使用 industry_cls, 还可以直接
# 输入 industry_data, 权重处理一样支持直接输入 weight_data
# 方式一:使用聚宽支持的行业数据源
# dataapi = QA_data.DataApi(jq_username="聚宽用户名",
# jq_password="聚宽密码",
# factor_time_range=factor_time_range,
# industry_cls="sw_l1",
# weight_cls="mktcap",
# detailed=True,
# frequence='1d')
# # 方式二:直接输入行业数据
industry_data = pd.Series(index=factor.index, data='白酒')
dataapi = DataApi(jq_username="聚宽用户名",
jq_password="聚宽密码",
factor_time_range=factor_time_range,
industry_cls=None,
industry_data=industry_data,
weight_cls="mktcap",
detailed=True,
frequence='1d')
analyzer = FactorAnalyzer(factor=factor, **dataapi.apis, max_loss=0.9)
# 加工后的因子数据,包括因子值、行业、权重、远期收益以及而分位数据
clean_factor = analyzer.clean_factor_data
# 基于加工后的因子数据统计分析结果展示
analyzer.create_summary_tear_sheet()
| mit | -6,047,993,971,646,748,000 | 31.215054 | 76 | 0.642523 | false |
halvertoluke/edx-platform | lms/djangoapps/instructor_task/subtasks.py | 52 | 27541 | """
This module contains celery task functions for handling the management of subtasks.
"""
from time import time
import json
from uuid import uuid4
import psutil
from contextlib import contextmanager
import logging
from celery.states import SUCCESS, READY_STATES, RETRY
import dogstats_wrapper as dog_stats_api
from django.db import transaction, DatabaseError
from django.core.cache import cache
from instructor_task.models import InstructorTask, PROGRESS, QUEUING
from util.db import outer_atomic
TASK_LOG = logging.getLogger('edx.celery.task')
# Lock expiration should be long enough to allow a subtask to complete.
SUBTASK_LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes
# Number of times to retry if a subtask update encounters a lock on the InstructorTask.
# (These are recursive retries, so don't make this number too large.)
MAX_DATABASE_LOCK_RETRIES = 5
class DuplicateTaskException(Exception):
"""Exception indicating that a task already exists or has already completed."""
pass
def _get_number_of_subtasks(total_num_items, items_per_task):
"""
Determines number of subtasks that would be generated by _generate_items_for_subtask.
This needs to be calculated before the query is executed so that the list of all subtasks can be
stored in the InstructorTask before any subtasks are started.
The number of subtask_id values returned by this should match the number of chunks returned
by the generate_items_for_subtask generator.
"""
num_subtasks, remainder = divmod(total_num_items, items_per_task)
if remainder:
num_subtasks += 1
return num_subtasks
@contextmanager
def track_memory_usage(metric, course_id):
"""
Context manager to track how much memory (in bytes) a given process uses.
Metrics will look like: 'course_email.subtask_generation.memory.rss'
or 'course_email.subtask_generation.memory.vms'.
"""
memory_types = ['rss', 'vms']
process = psutil.Process()
baseline_memory_info = process.get_memory_info()
baseline_usages = [getattr(baseline_memory_info, memory_type) for memory_type in memory_types]
yield
for memory_type, baseline_usage in zip(memory_types, baseline_usages):
total_memory_info = process.get_memory_info()
total_usage = getattr(total_memory_info, memory_type)
memory_used = total_usage - baseline_usage
dog_stats_api.increment(
metric + "." + memory_type,
memory_used,
tags=["course_id:{}".format(course_id)],
)
def _generate_items_for_subtask(
item_querysets, # pylint: disable=bad-continuation
item_fields,
total_num_items,
items_per_task,
total_num_subtasks,
course_id,
):
"""
Generates a chunk of "items" that should be passed into a subtask.
Arguments:
`item_querysets` : a list of query sets, each of which defines the "items" that should be passed to subtasks.
`item_fields` : the fields that should be included in the dict that is returned.
These are in addition to the 'pk' field.
`total_num_items` : the result of summing the count of each queryset in `item_querysets`.
`items_per_query` : size of chunks to break the query operation into.
`items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
`course_id` : course_id of the course. Only needed for the track_memory_usage context manager.
Returns: yields a list of dicts, where each dict contains the fields in `item_fields`, plus the 'pk' field.
Warning: if the algorithm here changes, the _get_number_of_subtasks() method should similarly be changed.
"""
num_items_queued = 0
all_item_fields = list(item_fields)
all_item_fields.append('pk')
num_subtasks = 0
items_for_task = []
with track_memory_usage('course_email.subtask_generation.memory', course_id):
for queryset in item_querysets:
for item in queryset.values(*all_item_fields).iterator():
if len(items_for_task) == items_per_task and num_subtasks < total_num_subtasks - 1:
yield items_for_task
num_items_queued += items_per_task
items_for_task = []
num_subtasks += 1
items_for_task.append(item)
# yield remainder items for task, if any
if items_for_task:
yield items_for_task
num_items_queued += len(items_for_task)
# Note, depending on what kind of DB is used, it's possible for the queryset
# we iterate over to change in the course of the query. Therefore it's
# possible that there are more (or fewer) items queued than were initially
# calculated. It also means it's possible that the last task contains
# more items than items_per_task allows. We expect this to be a small enough
# number as to be negligible.
if num_items_queued != total_num_items:
TASK_LOG.info("Number of items generated by chunking %s not equal to original total %s", num_items_queued, total_num_items)
class SubtaskStatus(object):
"""
Create and return a dict for tracking the status of a subtask.
SubtaskStatus values are:
'task_id' : id of subtask. This is used to pass task information across retries.
'attempted' : number of attempts -- should equal succeeded plus failed
'succeeded' : number that succeeded in processing
'skipped' : number that were not processed.
'failed' : number that failed during processing
'retried_nomax' : number of times the subtask has been retried for conditions that
should not have a maximum count applied
'retried_withmax' : number of times the subtask has been retried for conditions that
should have a maximum count applied
'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)
Object is not JSON-serializable, so to_dict and from_dict methods are provided so that
it can be passed as a serializable argument to tasks (and be reconstituted within such tasks).
In future, we may want to include specific error information
indicating the reason for failure.
Also, we should count up "not attempted" separately from attempted/failed.
"""
def __init__(self, task_id, attempted=None, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
"""Construct a SubtaskStatus object."""
self.task_id = task_id
if attempted is not None:
self.attempted = attempted
else:
self.attempted = succeeded + failed
self.succeeded = succeeded
self.failed = failed
self.skipped = skipped
self.retried_nomax = retried_nomax
self.retried_withmax = retried_withmax
self.state = state if state is not None else QUEUING
@classmethod
def from_dict(cls, d):
"""Construct a SubtaskStatus object from a dict representation."""
options = dict(d)
task_id = options['task_id']
del options['task_id']
return SubtaskStatus.create(task_id, **options)
@classmethod
def create(cls, task_id, **options):
"""Construct a SubtaskStatus object."""
return cls(task_id, **options)
def to_dict(self):
"""
Output a dict representation of a SubtaskStatus object.
Use for creating a JSON-serializable representation for use by tasks.
"""
return self.__dict__
def increment(self, succeeded=0, failed=0, skipped=0, retried_nomax=0, retried_withmax=0, state=None):
"""
Update the result of a subtask with additional results.
Kwarg arguments are incremented to the existing values.
The exception is for `state`, which if specified is used to override the existing value.
"""
self.attempted += (succeeded + failed)
self.succeeded += succeeded
self.failed += failed
self.skipped += skipped
self.retried_nomax += retried_nomax
self.retried_withmax += retried_withmax
if state is not None:
self.state = state
def get_retry_count(self):
"""Returns the number of retries of any kind."""
return self.retried_nomax + self.retried_withmax
def __repr__(self):
"""Return print representation of a SubtaskStatus object."""
return 'SubtaskStatus<%r>' % (self.to_dict(),)
def __unicode__(self):
"""Return unicode version of a SubtaskStatus object representation."""
return unicode(repr(self))
def initialize_subtask_info(entry, action_name, total_num, subtask_id_list):
"""
Store initial subtask information to InstructorTask object.
The InstructorTask's "task_output" field is initialized. This is a JSON-serialized dict.
Counters for 'attempted', 'succeeded', 'failed', 'skipped' keys are initialized to zero,
as is the 'duration_ms' value. A 'start_time' is stored for later duration calculations,
and the total number of "things to do" is set, so the user can be told how much needs to be
done overall. The `action_name` is also stored, to help with constructing more readable
task_progress messages.
The InstructorTask's "subtasks" field is also initialized. This is also a JSON-serialized dict.
Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
subtasks. 'Total' is set here to the total number, while the other three are initialized to zero.
Once the counters for 'succeeded' and 'failed' match the 'total', the subtasks are done and
the InstructorTask's "status" will be changed to SUCCESS.
The "subtasks" field also contains a 'status' key, that contains a dict that stores status
information for each subtask. The value for each subtask (keyed by its task_id)
is its subtask status, as defined by SubtaskStatus.to_dict().
This information needs to be set up in the InstructorTask before any of the subtasks start
running. If not, there is a chance that the subtasks could complete before the parent task
is done creating subtasks. Doing so also simplifies the save() here, as it avoids the need
for locking.
Monitoring code should assume that if an InstructorTask has subtask information, that it should
rely on the status stored in the InstructorTask object, rather than status stored in the
corresponding AsyncResult.
"""
task_progress = {
'action_name': action_name,
'attempted': 0,
'failed': 0,
'skipped': 0,
'succeeded': 0,
'total': total_num,
'duration_ms': int(0),
'start_time': time()
}
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = PROGRESS
# Write out the subtasks information.
num_subtasks = len(subtask_id_list)
# Note that may not be necessary to store initial value with all those zeroes!
# Write out as a dict, so it will go more smoothly into json.
subtask_status = {subtask_id: (SubtaskStatus.create(subtask_id)).to_dict() for subtask_id in subtask_id_list}
subtask_dict = {
'total': num_subtasks,
'succeeded': 0,
'failed': 0,
'status': subtask_status
}
entry.subtasks = json.dumps(subtask_dict)
# and save the entry immediately, before any subtasks actually start work:
entry.save_now()
return task_progress
# pylint: disable=bad-continuation
def queue_subtasks_for_query(
entry,
action_name,
create_subtask_fcn,
item_querysets,
item_fields,
items_per_task,
total_num_items,
):
"""
Generates and queues subtasks to each execute a chunk of "items" generated by a queryset.
Arguments:
`entry` : the InstructorTask object for which subtasks are being queued.
`action_name` : a past-tense verb that can be used for constructing readable status messages.
`create_subtask_fcn` : a function of two arguments that constructs the desired kind of subtask object.
Arguments are the list of items to be processed by this subtask, and a SubtaskStatus
object reflecting initial status (and containing the subtask's id).
`item_querysets` : a list of query sets that define the "items" that should be passed to subtasks.
`item_fields` : the fields that should be included in the dict that is returned.
These are in addition to the 'pk' field.
`items_per_task` : maximum size of chunks to break each query chunk into for use by a subtask.
`total_num_items` : total amount of items that will be put into subtasks
Returns: the task progress as stored in the InstructorTask object.
"""
task_id = entry.task_id
# Calculate the number of tasks that will be created, and create a list of ids for each task.
total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_task)
subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)]
# Update the InstructorTask with information about the subtasks we've defined.
TASK_LOG.info(
"Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.",
task_id,
entry.id,
total_num_subtasks,
total_num_items,
)
# Make sure this is committed to database before handing off subtasks to celery.
with outer_atomic():
progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list)
# Construct a generator that will return the recipients to use for each subtask.
# Pass in the desired fields to fetch for each recipient.
item_list_generator = _generate_items_for_subtask(
item_querysets,
item_fields,
total_num_items,
items_per_task,
total_num_subtasks,
entry.course_id,
)
# Now create the subtasks, and start them running.
TASK_LOG.info(
"Task %s: creating %s subtasks to process %s items.",
task_id,
total_num_subtasks,
total_num_items,
)
num_subtasks = 0
for item_list in item_list_generator:
subtask_id = subtask_id_list[num_subtasks]
num_subtasks += 1
subtask_status = SubtaskStatus.create(subtask_id)
new_subtask = create_subtask_fcn(item_list, subtask_status)
new_subtask.apply_async()
# Subtasks have been queued so no exceptions should be raised after this point.
# Return the task progress as stored in the InstructorTask object.
return progress
def _acquire_subtask_lock(task_id):
"""
Mark the specified task_id as being in progress.
This is used to make sure that the same task is not worked on by more than one worker
at the same time. This can occur when tasks are requeued by Celery in response to
loss of connection to the task broker. Most of the time, such duplicate tasks are
run sequentially, but they can overlap in processing as well.
Returns true if the task_id was not already locked; false if it was.
"""
# cache.add fails if the key already exists
key = "subtask-{}".format(task_id)
succeeded = cache.add(key, 'true', SUBTASK_LOCK_EXPIRE)
if not succeeded:
TASK_LOG.warning("task_id '%s': already locked. Contains value '%s'", task_id, cache.get(key))
return succeeded
def _release_subtask_lock(task_id):
"""
Unmark the specified task_id as being no longer in progress.
This is most important to permit a task to be retried.
"""
# According to Celery task cookbook, "Memcache delete is very slow, but we have
# to use it to take advantage of using add() for atomic locking."
key = "subtask-{}".format(task_id)
cache.delete(key)
def check_subtask_is_valid(entry_id, current_task_id, new_subtask_status):
"""
Confirms that the current subtask is known to the InstructorTask and hasn't already been completed.
Problems can occur when the parent task has been run twice, and results in duplicate
subtasks being created for the same InstructorTask entry. This maybe happens when Celery
loses its connection to its broker, and any current tasks get requeued.
If a parent task gets requeued, then the same InstructorTask may have a different set of
subtasks defined (to do the same thing), so the subtasks from the first queuing would not
be known to the InstructorTask. We return an exception in this case.
If a subtask gets requeued, then the first time the subtask runs it should run fine to completion.
However, we want to prevent it from running again, so we check here to see what the existing
subtask's status is. If it is complete, we raise an exception. We also take a lock on the task,
so that we can detect if another worker has started work but has not yet completed that work.
The other worker is allowed to finish, and this raises an exception.
Raises a DuplicateTaskException exception if it's not a task that should be run.
If this succeeds, it requires that update_subtask_status() is called to release the lock on the
task.
"""
# Confirm that the InstructorTask actually defines subtasks.
entry = InstructorTask.objects.get(pk=entry_id)
if len(entry.subtasks) == 0:
format_str = "Unexpected task_id '{}': unable to find subtasks of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.nosubtasks', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask knows about this particular subtask.
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
if current_task_id not in subtask_status_info:
format_str = "Unexpected task_id '{}': unable to find status for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.unknown', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask has already been
# performed successfully.
subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
subtask_state = subtask_status.state
if subtask_state in READY_STATES:
format_str = "Unexpected task_id '{}': already completed - status {} for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.completed', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Confirm that the InstructorTask doesn't think that this subtask is already being
# retried by another task.
if subtask_state == RETRY:
# Check to see if the input number of retries is less than the recorded number.
# If so, then this is an earlier version of the task, and a duplicate.
new_retry_count = new_subtask_status.get_retry_count()
current_retry_count = subtask_status.get_retry_count()
if new_retry_count < current_retry_count:
format_str = "Unexpected task_id '{}': already retried - status {} for subtask of instructor task '{}': rejecting task {}"
msg = format_str.format(current_task_id, subtask_status, entry, new_subtask_status)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.retried', tags=[entry.course_id])
raise DuplicateTaskException(msg)
# Now we are ready to start working on this. Try to lock it.
# If it fails, then it means that another worker is already in the
# middle of working on this.
if not _acquire_subtask_lock(current_task_id):
format_str = "Unexpected task_id '{}': already being executed - for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry)
TASK_LOG.warning(msg)
dog_stats_api.increment('instructor_task.subtask.duplicate.locked', tags=[entry.course_id])
raise DuplicateTaskException(msg)
def update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count=0):
"""
Update the status of the subtask in the parent InstructorTask object tracking its progress.
Because select_for_update is used to lock the InstructorTask object while it is being updated,
multiple subtasks updating at the same time may time out while waiting for the lock.
The actual update operation is surrounded by a try/except/else that permits the update to be
retried if the transaction times out.
The subtask lock acquired in the call to check_subtask_is_valid() is released here, only when
the attempting of retries has concluded.
"""
try:
_update_subtask_status(entry_id, current_task_id, new_subtask_status)
except DatabaseError:
# If we fail, try again recursively.
retry_count += 1
if retry_count < MAX_DATABASE_LOCK_RETRIES:
TASK_LOG.info("Retrying to update status for subtask %s of instructor task %d with status %s: retry %d",
current_task_id, entry_id, new_subtask_status, retry_count)
dog_stats_api.increment('instructor_task.subtask.retry_after_failed_update')
update_subtask_status(entry_id, current_task_id, new_subtask_status, retry_count)
else:
TASK_LOG.info("Failed to update status after %d retries for subtask %s of instructor task %d with status %s",
retry_count, current_task_id, entry_id, new_subtask_status)
dog_stats_api.increment('instructor_task.subtask.failed_after_update_retries')
raise
finally:
# Only release the lock on the subtask when we're done trying to update it.
# Note that this will be called each time a recursive call to update_subtask_status()
# returns. Fortunately, it's okay to release a lock that has already been released.
_release_subtask_lock(current_task_id)
@transaction.atomic
def _update_subtask_status(entry_id, current_task_id, new_subtask_status):
"""
Update the status of the subtask in the parent InstructorTask object tracking its progress.
Uses select_for_update to lock the InstructorTask object while it is being updated.
The operation is surrounded by a try/except/else that permit the manual transaction to be
committed on completion, or rolled back on error.
The InstructorTask's "task_output" field is updated. This is a JSON-serialized dict.
Accumulates values for 'attempted', 'succeeded', 'failed', 'skipped' from `new_subtask_status`
into the corresponding values in the InstructorTask's task_output. Also updates the 'duration_ms'
value with the current interval since the original InstructorTask started. Note that this
value is only approximate, since the subtask may be running on a different server than the
original task, so is subject to clock skew.
The InstructorTask's "subtasks" field is also updated. This is also a JSON-serialized dict.
Keys include 'total', 'succeeded', 'retried', 'failed', which are counters for the number of
subtasks. 'Total' is expected to have been set at the time the subtasks were created.
The other three counters are incremented depending on the value of `status`. Once the counters
for 'succeeded' and 'failed' match the 'total', the subtasks are done and the InstructorTask's
"status" is changed to SUCCESS.
The "subtasks" field also contains a 'status' key, that contains a dict that stores status
information for each subtask. At the moment, the value for each subtask (keyed by its task_id)
is the value of the SubtaskStatus.to_dict(), but could be expanded in future to store information
about failure messages, progress made, etc.
"""
TASK_LOG.info("Preparing to update status for subtask %s for instructor task %d with status %s",
current_task_id, entry_id, new_subtask_status)
try:
entry = InstructorTask.objects.select_for_update().get(pk=entry_id)
subtask_dict = json.loads(entry.subtasks)
subtask_status_info = subtask_dict['status']
if current_task_id not in subtask_status_info:
# unexpected error -- raise an exception
format_str = "Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'"
msg = format_str.format(current_task_id, entry_id)
TASK_LOG.warning(msg)
raise ValueError(msg)
# Update status:
subtask_status_info[current_task_id] = new_subtask_status.to_dict()
# Update the parent task progress.
# Set the estimate of duration, but only if it
# increases. Clock skew between time() returned by different machines
# may result in non-monotonic values for duration.
task_progress = json.loads(entry.task_output)
start_time = task_progress['start_time']
prev_duration = task_progress['duration_ms']
new_duration = int((time() - start_time) * 1000)
task_progress['duration_ms'] = max(prev_duration, new_duration)
# Update counts only when subtask is done.
# In future, we can make this more responsive by updating status
# between retries, by comparing counts that change from previous
# retry.
new_state = new_subtask_status.state
if new_subtask_status is not None and new_state in READY_STATES:
for statname in ['attempted', 'succeeded', 'failed', 'skipped']:
task_progress[statname] += getattr(new_subtask_status, statname)
# Figure out if we're actually done (i.e. this is the last task to complete).
# This is easier if we just maintain a counter, rather than scanning the
# entire new_subtask_status dict.
if new_state == SUCCESS:
subtask_dict['succeeded'] += 1
elif new_state in READY_STATES:
subtask_dict['failed'] += 1
num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed']
# If we're done with the last task, update the parent status to indicate that.
# At present, we mark the task as having succeeded. In future, we should see
# if there was a catastrophic failure that occurred, and figure out how to
# report that here.
if num_remaining <= 0:
entry.task_state = SUCCESS
entry.subtasks = json.dumps(subtask_dict)
entry.task_output = InstructorTask.create_output_for_success(task_progress)
TASK_LOG.debug("about to save....")
entry.save()
TASK_LOG.info("Task output updated to %s for subtask %s of instructor task %d",
entry.task_output, current_task_id, entry_id)
except Exception:
TASK_LOG.exception("Unexpected error while updating InstructorTask.")
dog_stats_api.increment('instructor_task.subtask.update_exception')
raise
| agpl-3.0 | -5,704,662,597,960,257,000 | 45.758913 | 134 | 0.683563 | false |
kumarsaurabh20/NGShelper | NGS/get_split_position.py | 1 | 2754 | ##
# pysam.cigar: the tuple [ (0,3), (1,5), (0,2) ] refers to an alignment with 3 matches, 5 insertions and another 2 matches.
# @ args min_gap is a parameter that specifies the length of the softclip/split
# in order for the function get_split_positions to return it as a valid split.
# e.g. min_gap =3 returns all breakpoints for where the unaligned part is longer than 2 base pairs.
def get_split_positions(read, min_gap):
"""Parse cigar string for detection of break points in a read
Break point is a position where a read is aligned to a reference,
but the position in the read following this position is no longer aligned.
Break points can be caused by softclipping (ends are not aligned),
or by an alignment that has split the read up into disjoint segments aligned on different places.
"""
cigar = read.cigar
# Cigar string is a list of tuples:
if len(read.cigar) <= 1:
return [] # no break points = empty list of break point positions
##
# read has break points if cigar string is longer than 1
# This is a list with the breakpoint tuples
list_of_break_point_positions = []
# set the current position on the genome
if cigar[0][0] == 0:
current_pos = int(read.positions[0])
else:
current_pos = int(read.positions[0]) - cigar[0][1]
# Search for breakpoints in cigar and get the corresponding position on the genome
i = 0
for info_tuple in cigar:
# If current segment in cigar string is aligned.
if info_tuple[0] == 0:
# Special case when at first segment:
if i == 0 and cigar[1][1] >= min_gap: # first end-split
list_of_break_point_positions.append((current_pos + info_tuple[1] , True))
# Special case when at last segment:
elif i == len(cigar) - 1 and cigar[i - 1][1] >= min_gap:
list_of_break_point_positions.append((current_pos, False))
# Internal segments:
elif cigar[i - 1][1] >= min_gap and cigar[i + 1][1] >= min_gap:
if cigar[i - 1][1] >= min_gap:
list_of_break_point_positions.append((current_pos, False))
if cigar[i + 1][1] >= min_gap:
list_of_break_point_positions.append((current_pos + info_tuple[1] - 1, True))
i += 1
current_pos += info_tuple[1]
return(list_of_break_point_positions)
if __name__ == "__main__":
from svest.util import bamio
import sys
bamfile = bamio.open_bam_file(sys.argv[1])
for read in bamfile:
#print read.cigar
if not read.is_unmapped and len(read.cigar) > 1:
#print read.cigar
print get_split_positions(read, 0)
Status | gpl-3.0 | -7,358,448,829,402,011,000 | 36.739726 | 123 | 0.619826 | false |
tommo/gii | support/waf/waflib/Tools/tex.py | 2 | 12261 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
"""
TeX/LaTeX/PDFLaTeX/XeLaTeX support
Example::
def configure(conf):
conf.load('tex')
if not conf.env.LATEX:
conf.fatal('The program LaTex is required')
def build(bld):
bld(
features = 'tex',
type = 'latex', # pdflatex or xelatex
source = 'document.ltx', # mandatory, the source
outs = 'ps', # 'pdf' or 'ps pdf'
deps = 'crossreferencing.lst', # to give dependencies directly
prompt = 1, # 0 for the batch mode
)
To configure with a special program use::
$ PDFLATEX=luatex waf configure
"""
import os, re
from waflib import Utils, Task, Errors, Logs
from waflib.TaskGen import feature, before_method
re_bibunit = re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
"""
Parse the inputs and try to find the *bibunit* dependencies
:return: list of bibunit files
:rtype: list of :py:class:`waflib.Node.Node`
"""
node = self.inputs[0]
nodes = []
if not node: return nodes
code = node.read()
for match in re_bibunit.finditer(code):
path = match.group('file')
if path:
for k in ['', '.bib']:
# add another loop for the tex include paths?
Logs.debug('tex: trying %s%s' % (path, k))
fi = node.parent.find_resource(path + k)
if fi:
nodes.append(fi)
# no break, people are crazy
else:
Logs.debug('tex: could not find %s' % path)
Logs.debug("tex: found the following bibunit files: %s" % nodes)
return nodes
exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps']
"""List of typical file extensions included in latex files"""
exts_tex = ['.ltx', '.tex']
"""List of typical file extensions that contain latex"""
re_tex = re.compile(r'\\(?P<type>include|bibliography|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
"""Regexp for expressions that may include latex files"""
g_bibtex_re = re.compile('bibdata', re.M)
"""Regexp for bibtex files"""
class tex(Task.Task):
"""
Compile a tex/latex file.
.. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex
"""
bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False)
bibtex_fun.__doc__ = """
Execute the program **bibtex**
"""
makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False)
makeindex_fun.__doc__ = """
Execute the program **makeindex**
"""
def exec_command(self, cmd, **kw):
"""
Override :py:meth:`waflib.Task.Task.exec_command` to execute the command without buffering (latex may prompt for inputs)
:return: the return code
:rtype: int
"""
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
return Utils.subprocess.Popen(cmd, **kw).wait()
def scan_aux(self, node):
"""
A recursive regex-based scanner that finds included auxiliary files.
"""
nodes = [node]
re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M)
def parse_node(node):
code = node.read()
for match in re_aux.finditer(code):
path = match.group('file')
found = node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node ' + found.abspath())
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
"""
A recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex`
Depending on your needs you might want:
* to change re_tex::
from waflib.Tools import tex
tex.re_tex = myregex
* or to change the method scan from the latex tasks::
from waflib.Task import classes
classes['latex'].scan = myscanfunction
"""
node = self.inputs[0]
nodes = []
names = []
seen = []
if not node: return (nodes, names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code = node.read()
global re_tex
for match in re_tex.finditer(code):
for path in match.group('file').split(','):
if path:
add_name = True
found = None
for k in exts_deps_tex:
Logs.debug('tex: trying %s%s' % (path, k))
found = node.parent.find_resource(path + k)
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name = False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
# no break, people are crazy
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s" % (nodes, names))
return (nodes, names)
def check_status(self, msg, retcode):
"""
Check an exit status and raise an error with a particular message
:param msg: message to display if the code is non-zero
:type msg: string
:param retcode: condition
:type retcode: boolean
"""
if retcode != 0:
raise Errors.WafError("%r command exit status %r" % (msg, retcode))
def bibfile(self):
"""
Parse the *.aux* files to find bibfiles to process.
If yes, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`
"""
for aux_node in self.aux_nodes:
try:
ct = aux_node.read()
except (OSError, IOError):
Logs.error('Error reading %s: %r' % aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
Logs.warn('calling bibtex')
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS': self.TEXINPUTS, 'BSTINPUTS': self.TEXINPUTS})
self.env.SRCFILE = aux_node.name[:-4]
self.check_status('error when calling bibtex', self.bibtex_fun())
def bibunits(self):
"""
Parse the *.aux* file to find bibunit files. If there are bibunit files,
execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun`.
"""
try:
bibunits = bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn = ['bu' + str(i) for i in xrange(1, len(bibunits) + 1)]
if fn:
Logs.warn('calling bibtex on bibunits')
for f in fn:
self.env.env = {'BIBINPUTS': self.TEXINPUTS, 'BSTINPUTS': self.TEXINPUTS}
self.env.SRCFILE = f
self.check_status('error when calling bibtex', self.bibtex_fun())
def makeindex(self):
"""
Look on the filesystem if there is a *.idx* file to process. If yes, execute
:py:meth:`waflib.Tools.tex.tex.makeindex_fun`
"""
try:
idx_path = self.idx_node.abspath()
os.stat(idx_path)
except OSError:
Logs.warn('index file %s absent, not calling makeindex' % idx_path)
else:
Logs.warn('calling makeindex')
self.env.SRCFILE = self.idx_node.name
self.env.env = {}
self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun())
def bibtopic(self):
"""
Additional .aux files from the bibtopic package
"""
p = self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')):
self.aux_nodes += p.ant_glob('*[0-9].aux')
def run(self):
"""
Runs the TeX build process.
It may require multiple passes, depending on the usage of cross-references,
bibliographies, content susceptible of needing such passes.
The appropriate TeX compiler is called until the *.aux* files stop changing.
Makeindex and bibtex are called if necessary.
"""
env = self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS', '-interaction=batchmode')
env.append_value('PDFLATEXFLAGS', '-interaction=batchmode')
env.append_value('XELATEXFLAGS', '-interaction=batchmode')
fun = self.texfun
node = self.inputs[0]
srcfile = node.abspath()
texinputs = self.env.TEXINPUTS or ''
self.TEXINPUTS = node.parent.get_bld().abspath() + os.pathsep + node.parent.get_src().abspath() + os.pathsep + texinputs + os.pathsep
# important, set the cwd for everybody
self.cwd = self.inputs[0].parent.get_bld().abspath()
Logs.warn('first pass on %s' % self.__class__.__name__)
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.TEXINPUTS})
self.env.SRCFILE = srcfile
self.check_status('error when calling latex', fun())
self.aux_nodes = self.scan_aux(node.change_ext('.aux'))
self.idx_node = node.change_ext('.idx')
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
hash = ''
for i in range(10):
# prevent against infinite loops - one never knows
# watch the contents of file.aux and stop if file.aux does not change anymore
prev_hash = hash
try:
hashes = [Utils.h_file(x.abspath()) for x in self.aux_nodes]
hash = Utils.h_list(hashes)
except (OSError, IOError):
Logs.error('could not read aux.h')
pass
if hash and hash == prev_hash:
break
# run the command
Logs.warn('calling %s' % self.__class__.__name__)
self.env.env = {}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS': self.TEXINPUTS})
self.env.SRCFILE = srcfile
self.check_status('error when calling %s' % self.__class__.__name__, fun())
class latex(tex):
texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False)
class pdflatex(tex):
texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False)
class xelatex(tex):
texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False)
class dvips(Task.Task):
run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
class dvipdf(Task.Task):
run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
class pdf2ps(Task.Task):
run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color = 'BLUE'
after = ['latex', 'pdflatex', 'xelatex']
@feature('tex')
@before_method('process_source')
def apply_tex(self):
"""
Create :py:class:`waflib.Tools.tex.tex` objects, and dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc).
"""
if not getattr(self, 'type', None) in ['latex', 'pdflatex', 'xelatex']:
self.type = 'pdflatex'
tree = self.bld
outs = Utils.to_list(getattr(self, 'outs', []))
# prompt for incomplete files (else the batchmode is used)
self.env['PROMPT_LATEX'] = getattr(self, 'prompt', 1)
deps_lst = []
if getattr(self, 'deps', None):
deps = self.to_list(self.deps)
for filename in deps:
n = self.path.find_resource(filename)
if not n:
self.bld.fatal('Could not find %r for %r' % (filename, self))
if not n in deps_lst:
deps_lst.append(n)
for node in self.to_nodes(self.source):
if self.type == 'latex':
task = self.create_task('latex', node, node.change_ext('.dvi'))
elif self.type == 'pdflatex':
task = self.create_task('pdflatex', node, node.change_ext('.pdf'))
elif self.type == 'xelatex':
task = self.create_task('xelatex', node, node.change_ext('.pdf'))
task.env = self.env
# add the manual dependencies
if deps_lst:
try:
lst = tree.node_deps[task.uid()]
for n in deps_lst:
if not n in lst:
lst.append(n)
except KeyError:
tree.node_deps[task.uid()] = deps_lst
v = dict(os.environ)
p = node.parent.abspath() + os.pathsep + self.path.abspath() + os.pathsep + self.path.get_bld().abspath() + os.pathsep + v.get('TEXINPUTS', '') + os.pathsep
v['TEXINPUTS'] = p
if self.type == 'latex':
if 'ps' in outs:
tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps'))
tsk.env.env = dict(v)
if 'pdf' in outs:
tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf'))
tsk.env.env = dict(v)
elif self.type == 'pdflatex':
if 'ps' in outs:
self.create_task('pdf2ps', task.outputs, node.change_ext('.ps'))
self.source = []
def configure(self):
"""
Try to find the programs tex, latex and others. Do not raise any error if they
are not found.
"""
v = self.env
for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split():
try:
self.find_program(p, var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS'] = '-Ppdf'
| mit | 3,294,641,004,977,396,000 | 27.447796 | 158 | 0.646277 | false |
reviewboard/rbintegrations | rbintegrations/basechat/integration.py | 1 | 23215 | """Integration for chat integrations"""
from __future__ import unicode_literals
import logging
from djblets.db.query import get_object_or_none
from djblets.util.templatetags.djblets_utils import user_displayname
from reviewboard.accounts.models import Trophy
from reviewboard.admin.server import build_server_url
from reviewboard.extensions.hooks import SignalHook
from reviewboard.integrations import Integration
from reviewboard.reviews.models import (BaseComment, Comment,
FileAttachmentComment,
GeneralComment,
ReviewRequest,
ScreenshotComment)
from reviewboard.reviews.signals import (review_request_closed,
review_request_published,
review_request_reopened,
review_published,
reply_published)
from reviewboard.site.urlresolvers import local_site_reverse
class BaseChatIntegration(Integration):
"""Integrates Review Board with chat applications.
This will handle updating chat channels when review requests are posted,
changed, or closed, and when there's new activity on the review request.
"""
def initialize(self):
"""Initialize the integration hooks."""
hooks = (
(review_request_closed, self._on_review_request_closed),
(review_request_published, self._on_review_request_published),
(review_request_reopened, self._on_review_request_reopened),
(review_published, self._on_review_published),
(reply_published, self._on_reply_published),
)
for signal, handler in hooks:
SignalHook(self, signal, handler)
def notify(self, title, title_link, fallback_text, local_site,
review_request, event_name=None, fields={}, pre_text=None,
body=None, color=None, thumb_url=None, image_url=None):
"""Send a webhook notification to chat application.
This will post the given message to any channels configured to
receive it.
Args:
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
fields (dict):
The fields comprising the rich message to display in chat.
local_site (reviewboard.site.models.LocalSite):
The Local Site for the review request or review emitting
the message. Only integration configurations matching this
Local Site will be processed.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the notification is bound to.
event_name (unicode):
The name of the event triggering this notification.
pre_text (unicode, optional):
Text to display before the rest of the message.
body (unicode, optional):
The body of the message.
color (unicode, optional):
A color string or RGB hex value for the message.
thumb_url (unicode, optional):
URL of an image to show on the side of the message.
image_url (unicode, optional):
URL of an image to show in the message.
"""
raise NotImplementedError(
'%s must implement notify' % type(self).__name__)
def notify_review_or_reply(self, user, review, pre_text, fallback_text,
event_name, first_comment=None, **kwargs):
"""Notify chat application for any new posted reviews or replies.
This performs the common work of notifying configured channels
when there's a review or a reply.
Args:
user (django.contrib.auth.models.User):
The user who posted the review or reply.
review (reviewboard.reviews.models.Review):
The review or reply that was posted.
pre_text (unicode, optional):
Text to show before the message attachments.
fallback_text (unicode, optional):
Text to show in the fallback text, before the review URL and
after the review request ID.
event_name (unicode):
The name of the event triggering this notification.
first_comment (reviewboard.reviews.models.BaseComment, optional):
The first comment in a review, to generate the body message
from. This is optional, and will be computed if needed.
**kwargs (dict):
Other keyword arguments to pass to :py:meth:`notify`.
"""
review_request = review.review_request
review_url = build_server_url(review.get_absolute_url())
fallback_text = '#%s: %s: %s' % (review_request.display_id,
fallback_text, review_url)
if review.body_top:
body = review.body_top
# This is silly to show twice.
if review.ship_it and body == 'Ship It!':
body = ''
else:
if not first_comment:
for comment_cls in (Comment, FileAttachmentComment,
ScreenshotComment, GeneralComment):
try:
first_comment = (
comment_cls.objects
.filter(review=review)
.only('text')
)[0]
break
except IndexError:
pass
if first_comment:
body = first_comment.text
self.notify(title=self.get_review_request_title(review_request),
title_link=review_url,
fallback_text=fallback_text,
pre_text=pre_text,
body=body,
local_site=review.review_request.local_site,
review_request=review_request,
event_name=event_name,
**kwargs)
def notify_review_request(self, review_request, fallback_text, event_name,
**kwargs):
"""Notify chat application for a review request update.
This performs the common work of notifying configured channels
when there's a new review request or update to a review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
fallback_text (unicode, optional):
Text to show in the fallback text, before the review URL and
after the review request ID.
event_name (unicode):
The name of the event triggering this notification.
**kwargs (dict):
Other keyword arguments to pass to :py:meth:`notify`.
"""
review_request_url = self.get_review_request_url(review_request)
fallback_text = '#%s: %s: %s' % (review_request.display_id,
fallback_text,
review_request_url)
self.notify(title=self.get_review_request_title(review_request),
title_link=review_request_url,
fallback_text=fallback_text,
review_request=review_request,
event_name=event_name,
**kwargs)
def format_link(self, path, text):
"""Format the given URL and text to be shown in a Slack message.
This will combine together the parts of the URL (method, domain, path)
and format it using Slack's URL syntax.
Args:
path (unicode):
The path on the Review Board server.
text (unicode):
The text for the link.
Returns:
unicode:
The link for use in Slack.
"""
raise NotImplementedError(
'%s must implement format_link' % type(self).__name__)
def get_user_text_url(self, user, local_site):
"""Return the URL to a user page.
Args:
user (django.contrib.auth.models.User):
The user being linked to.
local_site (reviewboard.site.models.LocalSite):
The local site for the link, if any.
Returns:
unicode:
The URL to the user page.
"""
# This doesn't use user.get_absolute_url because that won't include
# site roots or local site names.
return local_site_reverse(
'user',
local_site=local_site,
kwargs={'username': user.username})
def get_user_text_link(self, user, local_site):
"""Return the chat application-formatted link to a user page.
Args:
user (django.contrib.auth.models.User):
The user being linked to.
local_site (reviewboard.site.models.LocalSite):
The local site for the link, if any.
Returns:
unicode:
The formatted link to the user page.
"""
return self.format_link(self.get_user_text_url(user, local_site),
user.get_full_name() or user.username)
def get_review_request_title(self, review_request):
"""Return the title for a review request message.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request.
Returns:
unicode:
The title for the message.
"""
return '#%s: %s' % (review_request.display_id, review_request.summary)
def get_review_request_text_link(self, review_request):
"""Return the chat application-formatted link to a review request.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request being linked to.
Returns:
unicode:
The formatted link to the review request.
"""
return self.format_link(review_request.get_absolute_url(),
review_request.summary)
def get_review_request_url(self, review_request):
"""Return the absolute URL to a review request.
Returns:
unicode:
The absolute URL to the review request.
"""
return build_server_url(review_request.get_absolute_url())
def _on_review_request_closed(self, user, review_request, close_type,
description=None, **kwargs):
"""Handler for when review requests are closed.
This will send a notification to any configured channels when
a review request is closed.
Args:
user (django.contrib.auth.models.User):
The user who closed the review request.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that was closed.
close_type (unicode):
The close type.
description (unicode):
The close message,
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
if not user:
user = review_request.submitter
user_link = self.get_user_text_link(user, review_request.local_site)
if close_type == ReviewRequest.DISCARDED:
pre_text = 'Discarded by %s' % user_link
fallback_text = 'Discarded by %s' % user_displayname(user)
elif close_type == ReviewRequest.SUBMITTED:
pre_text = 'Closed as completed by %s' % user_link
fallback_text = 'Closed as completed by %s' % \
user_displayname(user)
else:
logging.error('Tried to notify on review_request_closed for '
' review request pk=%d with unknown close type "%s"',
review_request.pk, close_type)
return
if not user:
user = review_request.submitter
self.notify_review_request(review_request,
fallback_text=fallback_text,
body=description,
pre_text=pre_text,
local_site=review_request.local_site,
event_name='review_request_closed')
def _on_review_request_published(self, user, review_request, changedesc,
**kwargs):
"""Handler for when review requests are published.
This will send a notification to any configured channels when
a review request is published.
Args:
user (django.contrib.auth.models.User):
The user who published the review request.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that was published.
changedesc (reviewboard.changedescs.models.ChangeDescription):
The change description for the update, if any.
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
user_link = self.get_user_text_link(user, review_request.local_site)
fields = []
if changedesc:
fallback_text = 'New update from %s' % user_displayname(user)
pre_text = 'New update from %s' % user_link
# This might be empty, which is fine. We won't show an update
# at that point.
body = changedesc.text
else:
fallback_text = 'New review request from %s' % \
user_displayname(user)
pre_text = 'New review request from %s' % user_link
body = None
fields.append({
'short': False,
'title': 'Description',
'value': review_request.description,
})
# Link to the diff in the update, if any.
diffset = review_request.get_latest_diffset()
if diffset:
diff_url = local_site_reverse(
'view-diff-revision',
local_site=review_request.local_site,
kwargs={
'review_request_id': review_request.display_id,
'revision': diffset.revision,
})
fields.append({
'short': True,
'title': 'Diff',
'value': self.format_link(diff_url,
'Revision %s' % diffset.revision),
})
if review_request.repository:
fields.append({
'short': True,
'title': 'Repository',
'value': review_request.repository.name,
})
if review_request.branch:
fields.append({
'short': True,
'title': 'Branch',
'value': review_request.branch,
})
# See if there are any new interesting file attachments to show.
# These will only show up if the file is accessible.
attachment = None
if changedesc:
# Only show new files added in this change.
try:
new_files = changedesc.fields_changed['files']['added']
except KeyError:
new_files = []
for file_info in new_files:
if (len(file_info) >= 3 and
file_info[1].endswith(self.VALID_IMAGE_URL_EXTS)):
# This one wins. Show it.
attachment = get_object_or_none(
review_request.file_attachments,
pk=file_info[2])
break
else:
# This is a new review request, so show the first valid image
# we can find.
for attachment in review_request.file_attachments.all():
if attachment.filename.endswith(self.VALID_IMAGE_URL_EXTS):
# This one wins. Show it.
break
else:
attachment = None
if attachment:
image_url = attachment.get_absolute_url()
else:
image_url = None
# Find any trophies we may want to show in the update.
trophies = Trophy.objects.get_trophies(review_request)
trophy_url = None
if trophies:
# For now, due to the need to look up resources from a stable
# location, we're only supporting certain trophies. First one
# wins.
for trophy in trophies:
try:
trophy_url = self.TROPHY_URLS[trophy.category]
break
except KeyError:
pass
self.notify_review_request(review_request,
fallback_text=fallback_text,
body=body,
pre_text=pre_text,
fields=fields,
thumb_url=trophy_url,
image_url=image_url,
local_site=review_request.local_site,
event_name='review_request_published')
def _on_review_request_reopened(self, user, review_request, **kwargs):
"""Handler for when review requests are reopened.
This will send a notification to any configured channels when
a review request is reopened.
Args:
user (django.contrib.auth.models.User):
The user who reopened the review request.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request that was published.
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
if not user:
user = review_request.submitter
user_link = self.get_user_text_link(user, review_request.local_site)
pre_text = 'Reopened by %s' % user_link
fallback_text = 'Reopened by %s' % user_displayname(user)
self.notify_review_request(review_request,
fallback_text=fallback_text,
pre_text=pre_text,
body=review_request.description,
local_site=review_request.local_site,
event_name='review_request_reopened')
def _on_review_published(self, user, review, **kwargs):
"""Handler for when a review is published.
This will send a notification to any configured channels when
a review is published.
Args:
user (django.contrib.auth.models.User):
The user who published the review.
review (reviewboard.reviews.models.Review):
The review that was published.
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
open_issues = 0
first_comment = None
for comment in review.get_all_comments():
if not first_comment:
first_comment = comment
if (comment.issue_opened and
comment.issue_status == BaseComment.OPEN):
open_issues += 1
if open_issues == 1:
issue_text = '1 issue'
else:
issue_text = '%d issues' % open_issues
user_link = self.get_user_text_link(user,
review.review_request.local_site)
pre_text = 'New review from %s' % user_link
# There doesn't seem to be any image support inside the text fields,
# but the :white_check_mark: emoji shows a green box with a check-mark
# in it, and the :warning: emoji is a yellow exclamation point, which
# are close enough.
if review.ship_it:
if open_issues:
fields = [{
'title': 'Fix it, then Ship it!',
'value': ':warning: %s' % issue_text,
'short': True,
}]
extra_text = ' (Fix it, then Ship it!)'
color = 'warning'
else:
fields = [{
'title': 'Ship it!',
'value': ':white_check_mark:',
'short': True,
}]
extra_text = ' (Ship it!)'
color = 'good'
elif open_issues:
fields = [{
'title': 'Open Issues',
'value': ':warning: %s' % issue_text,
'short': True,
}]
extra_text = ' (%s)' % issue_text
color = 'warning'
else:
fields = []
extra_text = ''
color = None
fallback_text = 'New review from %s%s' % (
user_displayname(user), extra_text)
self.notify_review_or_reply(user=user,
review=review,
pre_text=pre_text,
fallback_text=fallback_text,
first_comment=first_comment,
fields=fields,
color=color,
event_name='review_published')
def _on_reply_published(self, user, reply, **kwargs):
"""Handler for when a reply to a review is published.
This will send a notification to any configured channels when
a reply to a review is published.
Args:
user (django.contrib.auth.models.User):
The user who published the reply.
review (reviewboard.reviews.models.Review):
The reply that was published.
**kwargs (dict):
Additional keyword arguments passed to the handler.
"""
user_link = self.get_user_text_link(user,
reply.review_request.local_site)
pre_text = 'New reply from %s' % user_link
fallback_text = 'New reply from %s' % user_displayname(user)
self.notify_review_or_reply(user=user,
review=reply,
fallback_text=fallback_text,
pre_text=pre_text,
event_name='reply_published')
| mit | -313,616,565,035,736,900 | 36.809446 | 79 | 0.525393 | false |
biocommons/hgvs-eval | hgvseval/tests/test_hgvs.py | 1 | 1235 | import requests
import json
from urlparse import urljoin
def test_info(endpoint):
"""Tests the info endpoint for the given service."""
url = urljoin(endpoint, 'info')
print "Testing HGVS service info endpoint on: {}".format(url)
res = requests.get(url)
print res.json()
assert 1 == 1
def test_criteria(endpoint, criteria):
"""Criteria contains the params necessary to run each HGVS eval test.
criteria: {
'name': 'c_ins_to_dup',
'tags': 'hgvs_c|ins|dup',
'feature': 'formatting',
'output_accepted': 'NM_005813.3:c.2673dupA|NM_005813.3:c.2673dup',
'output_preferred': 'NM_005813.3:c.2673dup',
'input': 'NM_005813.3:c.2673insA',
'operation': 'rewrite',
'test_source': '',
'data': 'RefSeq',
'varnomen_reference': '',
'description': 'Tests ...'
}
"""
url = urljoin(endpoint, criteria['operation'])
print "Testing HGVS operation on: {}".format(url)
res = requests.post(
url,
data=json.dumps({
'ac': criteria['output_accepted'].split(':')[0],
'hgvsString': criteria['input']
}))
print res.json()
| apache-2.0 | 387,591,595,150,731,500 | 29.875 | 78 | 0.558704 | false |
randynobx/ansible | lib/ansible/modules/net_tools/dnsmadeeasy.py | 13 | 24115 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- >
Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
options:
account_key:
description:
- Account API Key.
required: true
default: null
account_secret:
description:
- Account Secret Key.
required: true
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
resolution
required: true
default: null
record_name:
description:
- Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
of the state argument.
required: false
default: null
record_type:
description:
- Record type.
required: false
choices: [ 'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ]
default: null
record_value:
description:
- >
Record value. HTTPRED: <redirection URL>, MX: <priority> <target name>, NS: <name server>, PTR: <target name>,
SRV: <priority> <weight> <port> <target name>, TXT: <text value>"
- >
If record_value is not specified; no changes will be made and the record will be returned in 'result'
(in other words, this module can be used to fetch a record's current id, type, and ttl)
required: false
default: null
record_ttl:
description:
- record's "Time to live". Number of seconds the record remains cached in DNS servers.
required: false
default: 1800
state:
description:
- whether the record should exist or not
required: true
choices: [ 'present', 'absent' ]
default: null
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
monitor:
description:
- If C(yes), add or change the monitor. This is applicable only for A records.
required: true
default: 'no'
choices: ['yes', 'no']
version_added: 2.4
systemDescription:
description:
- Description used by the monitor.
required: true
default: ''
version_added: 2.4
maxEmails:
description:
- Number of emails sent to the contact list by the monitor.
required: true
default: 1
version_added: 2.4
protocol:
description:
- Protocol used by the monitor.
required: true
default: 'HTTP'
choices: ['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']
version_added: 2.4
port:
description:
- Port used by the monitor.
required: true
default: 80
version_added: 2.4
sensitivity:
description:
- Number of checks the monitor performs before a failover occurs where Low = 8, Medium = 5,and High = 3.
required: true
default: 'Medium'
choices: ['Low', 'Medium', 'High']
version_added: 2.4
contactList:
description:
- Name or id of the contact list that the monitor will notify.
- The default C('') means the Account Owner.
required: true
default: ''
version_added: 2.4
httpFqdn:
description:
- The fully qualified domain name used by the monitor.
required: false
version_added: 2.4
httpFile:
description:
- The file at the Fqdn that the monitor queries for HTTP or HTTPS.
required: false
version_added: 2.4
httpQueryString:
description:
- The string in the httpFile that the monitor queries for HTTP or HTTPS.
required: False
version_added: 2.4
failover:
description:
- If C(yes), add or change the failover. This is applicable only for A records.
required: true
default: 'no'
choices: ['yes', 'no']
version_added: 2.4
autoFailover:
description:
- If true, fallback to the primary IP address is manual after a failover.
- If false, fallback to the primary IP address is automatic after a failover.
required: true
default: 'no'
choices: ['yes', 'no']
version_added: 2.4
ip1:
description:
- Primary IP address for the failover.
- Required if adding or changing the monitor or failover.
required: false
version_added: 2.4
ip2:
description:
- Secondary IP address for the failover.
- Required if adding or changing the failover.
required: false
version_added: 2.4
ip3:
description:
- Tertiary IP address for the failover.
required: false
version_added: 2.4
ip4:
description:
- Quaternary IP address for the failover.
required: false
version_added: 2.4
ip5:
description:
- Quinary IP address for the failover.
required: false
version_added: 2.4
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
seconds of actual time by using NTP.
- This module returns record(s) and monitor(s) in the "result" element when 'state' is set to 'present'.
These values can be be registered and used in your playbooks.
- Only A records can have a monitor or failover.
- To add failover, the 'failover', 'autoFailover', 'port', 'protocol', 'ip1', and 'ip2' options are required.
- To add monitor, the 'monitor', 'port', 'protocol', 'maxEmails', 'systemDescription', and 'ip1' options are required.
- The monitor and the failover will share 'port', 'protocol', and 'ip1' options.
requirements: [ hashlib, hmac ]
author: "Brice Burgess (@briceburg)"
'''
EXAMPLES = '''
# fetch my.com domain records
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
register: response
# create / ensure the presence of a record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
# update the previously created record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_value: 192.0.2.23
# fetch a specific record
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
register: response
# delete a record / ensure it is absent
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: absent
record_name: test
# Add a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
ip3: 127.0.0.4
ip4: 127.0.0.5
ip5: 127.0.0.6
# Add a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: my contact list
# Add a monitor with http options
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: yes
ip1: 127.0.0.2
protocol: HTTP # default
port: 80 # default
maxEmails: 1
systemDescription: Monitor Test A record
contactList: 1174 # contact list id
httpFqdn: http://my.com
httpFile: example
httpQueryString: some string
# Add a monitor and a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: True
ip1: 127.0.0.2
ip2: 127.0.0.3
monitor: yes
protocol: HTTPS
port: 443
maxEmails: 1
systemDescription: monitoring my.com status
contactList: emergencycontacts
# Remove a failover
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
failover: no
# Remove a monitor
- dnsmadeeasy:
account_key: key
account_secret: secret
domain: my.com
state: present
record_name: test
record_type: A
record_value: 127.0.0.1
monitor: no
'''
# ============================================
# DNSMadeEasy module specific support methods.
#
from ansible.module_utils.six.moves.urllib.parse import urlencode
IMPORT_ERROR = None
try:
import json
from time import strftime, gmtime
import hashlib
import hmac
except ImportError:
e = get_exception()
IMPORT_ERROR = str(e)
class DME2:
def __init__(self, apikey, secret, domain, module):
self.module = module
self.api = apikey
self.secret = secret
self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
self.records = None # ["record_ID"] => <record>
self.all_records = None
self.contactList_map = None # ["contactList_name"] => ID
# Lookup the domain ID if passed as a domain name vs. ID
if not self.domain.isdigit():
self.domain = self.getDomainByName(self.domain)['id']
self.record_url = 'dns/managed/' + str(self.domain) + '/records'
self.monitor_url = 'monitor'
self.contactList_url = 'contactList'
def _headers(self):
currTime = self._get_date()
hashstring = self._create_hash(currTime)
headers = {'x-dnsme-apiKey': self.api,
'x-dnsme-hmac': hashstring,
'x-dnsme-requestDate': currTime,
'content-type': 'application/json'}
return headers
def _get_date(self):
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _create_hash(self, rightnow):
return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest()
def query(self, resource, method, data=None):
url = self.baseurl + resource
if data and not isinstance(data, basestring):
data = urlencode(data)
response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
if info['status'] not in (200, 201, 204):
self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception:
return {}
def getDomain(self, domain_id):
if not self.domain_map:
self._instMap('domain')
return self.domains.get(domain_id, False)
def getDomainByName(self, domain_name):
if not self.domain_map:
self._instMap('domain')
return self.getDomain(self.domain_map.get(domain_name, 0))
def getDomains(self):
return self.query('dns/managed', 'GET')['data']
def getRecord(self, record_id):
if not self.record_map:
self._instMap('record')
return self.records.get(record_id, False)
# Try to find a single record matching this one.
# How we do this depends on the type of record. For instance, there
# can be several MX records for a single record_name while there can
# only be a single CNAME for a particular record_name. Note also that
# there can be several records with different types for a single name.
def getMatchingRecord(self, record_name, record_type, record_value):
# Get all the records if not already cached
if not self.all_records:
self.all_records = self.getRecords()
if record_type in ["A", "AAAA", "CNAME", "ANAME", "HTTPRED", "PTR"]:
for result in self.all_records:
if result['name'] == record_name and result['type'] == record_type:
return result
return False
elif record_type in ["MX", "NS", "TXT", "SRV"]:
for result in self.all_records:
if record_type == "MX":
value = record_value.split(" ")[1]
elif record_type == "SRV":
value = record_value.split(" ")[3]
else:
value = record_value
if result['name'] == record_name and result['type'] == record_type and result['value'] == value:
return result
return False
else:
raise Exception('record_type not yet supported')
def getRecords(self):
return self.query(self.record_url, 'GET')['data']
def _instMap(self, type):
#@TODO cache this call so it's executed only once per ansible execution
map = {}
results = {}
# iterate over e.g. self.getDomains() || self.getRecords()
for result in getattr(self, 'get' + type.title() + 's')():
map[result['name']] = result['id']
results[result['id']] = result
# e.g. self.domain_map || self.record_map
setattr(self, type + '_map', map)
setattr(self, type + 's', results) # e.g. self.domains || self.records
def prepareRecord(self, data):
return json.dumps(data, separators=(',', ':'))
def createRecord(self, data):
#@TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url, 'POST', data)
def updateRecord(self, record_id, data):
#@TODO update the cache w/ resultant record + id when impleneted
return self.query(self.record_url + '/' + str(record_id), 'PUT', data)
def deleteRecord(self, record_id):
#@TODO remove record from the cache when impleneted
return self.query(self.record_url + '/' + str(record_id), 'DELETE')
def getMonitor(self, record_id):
return self.query(self.monitor_url + '/' + str(record_id), 'GET')
def updateMonitor(self, record_id, data):
return self.query(self.monitor_url + '/' + str(record_id), 'PUT', data)
def prepareMonitor(self, data):
return json.dumps(data, separators=(',', ':'))
def getContactList(self, contact_list_id):
if not self.contactList_map:
self._instMap('contactList')
return self.contactLists.get(contact_list_id, False)
def getContactlists(self):
return self.query(self.contactList_url, 'GET')['data']
def getContactListByName(self, name):
if not self.contactList_map:
self._instMap('contactList')
return self.getContactList(self.contactList_map.get(name, 0))
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
account_key=dict(required=True),
account_secret=dict(required=True, no_log=True),
domain=dict(required=True),
state=dict(required=True, choices=['present', 'absent']),
record_name=dict(required=False),
record_type=dict(required=False, choices=[
'A', 'AAAA', 'CNAME', 'ANAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
monitor=dict(default='no', type='bool'),
systemDescription=dict(default=''),
maxEmails=dict(default=1, type='int'),
protocol=dict(default='HTTP', choices=['TCP', 'UDP', 'HTTP', 'DNS', 'SMTP', 'HTTPS']),
port=dict(default=80, type='int'),
sensitivity=dict(default='Medium', choices=['Low', 'Medium', 'High']),
contactList=dict(default=None),
httpFqdn=dict(required=False),
httpFile=dict(required=False),
httpQueryString=dict(required=False),
failover=dict(default='no', type='bool'),
autoFailover=dict(default='no', type='bool'),
ip1=dict(required=False),
ip2=dict(required=False),
ip3=dict(required=False),
ip4=dict(required=False),
ip5=dict(required=False),
validate_certs = dict(default='yes', type='bool'),
),
required_together=(
['record_value', 'record_ttl', 'record_type']
),
required_if=[
['failover', True, ['autoFailover', 'port', 'protocol', 'ip1', 'ip2']],
['monitor', True, ['port', 'protocol', 'maxEmails', 'systemDescription', 'ip1']]
]
)
if IMPORT_ERROR:
module.fail_json(msg="Import Error: " + IMPORT_ERROR)
protocols = dict(TCP=1, UDP=2, HTTP=3, DNS=4, SMTP=5, HTTPS=6)
sensitivities = dict(Low=8, Medium=5, High=3)
DME = DME2(module.params["account_key"], module.params[
"account_secret"], module.params["domain"], module)
state = module.params["state"]
record_name = module.params["record_name"]
record_type = module.params["record_type"]
record_value = module.params["record_value"]
# Follow Keyword Controlled Behavior
if record_name is None:
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
current_record = DME.getMatchingRecord(record_name, record_type, record_value)
new_record = {'name': record_name}
for i in ["record_value", "record_type", "record_ttl"]:
if not module.params[i] is None:
new_record[i[len("record_"):]] = module.params[i]
# Special handling for mx record
if new_record["type"] == "MX":
new_record["mxLevel"] = new_record["value"].split(" ")[0]
new_record["value"] = new_record["value"].split(" ")[1]
# Special handling for SRV records
if new_record["type"] == "SRV":
new_record["priority"] = new_record["value"].split(" ")[0]
new_record["weight"] = new_record["value"].split(" ")[1]
new_record["port"] = new_record["value"].split(" ")[2]
new_record["value"] = new_record["value"].split(" ")[3]
# Fetch existing monitor if the A record indicates it should exist and build the new monitor
current_monitor = dict()
new_monitor = dict()
if current_record and current_record['type'] == 'A':
current_monitor = DME.getMonitor(current_record['id'])
# Build the new monitor
for i in ['monitor', 'systemDescription', 'protocol', 'port', 'sensitivity', 'maxEmails',
'contactList', 'httpFqdn', 'httpFile', 'httpQueryString',
'failover', 'autoFailover', 'ip1', 'ip2', 'ip3', 'ip4', 'ip5']:
if module.params[i] is not None:
if i == 'protocol':
# The API requires protocol to be a numeric in the range 1-6
new_monitor['protocolId'] = protocols[module.params[i]]
elif i == 'sensitivity':
# The API requires sensitivity to be a numeric of 8, 5, or 3
new_monitor[i] = sensitivities[module.params[i]]
elif i == 'contactList':
# The module accepts either the name or the id of the contact list
contact_list_id = module.params[i]
if not contact_list_id.isdigit() and contact_list_id != '':
contact_list = DME.getContactListByName(contact_list_id)
if not contact_list:
module.fail_json(msg="Contact list {} does not exist".format(contact_list_id))
contact_list_id = contact_list.get('id', '')
new_monitor['contactListId'] = contact_list_id
else:
# The module option names match the API field names
new_monitor[i] = module.params[i]
# Compare new record against existing one
record_changed = False
if current_record:
for i in new_record:
if str(current_record[i]) != str(new_record[i]):
record_changed = True
new_record['id'] = str(current_record['id'])
monitor_changed = False
if current_monitor:
for i in new_monitor:
if str(current_monitor.get(i)) != str(new_monitor[i]):
monitor_changed = True
# Follow Keyword Controlled Behavior
if state == 'present':
# return the record if no value is specified
if not "value" in new_record:
if not current_record:
module.fail_json(
msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, module.params['domain']))
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
# create record and monitor as the record does not exist
if not current_record:
record = DME.createRecord(DME.prepareRecord(new_record))
monitor = DME.updateMonitor(record['id'], DME.prepareMonitor(new_monitor))
module.exit_json(changed=True, result=dict(record=record, monitor=monitor))
# update the record
updated = False
if record_changed:
DME.updateRecord(current_record['id'], DME.prepareRecord(new_record))
updated = True
if monitor_changed:
DME.updateMonitor(current_monitor['recordId'], DME.prepareMonitor(new_monitor))
updated = True
if updated:
module.exit_json(changed=True, result=dict(record=new_record, monitor=new_monitor))
# return the record (no changes)
module.exit_json(changed=False, result=dict(record=current_record, monitor=current_monitor))
elif state == 'absent':
changed = False
# delete the record (and the monitor/failover) if it exists
if current_record:
DME.deleteRecord(current_record['id'])
module.exit_json(changed=True)
# record does not exist, return w/o change.
module.exit_json(changed=False)
else:
module.fail_json(
msg="'%s' is an unknown value for the state argument" % state)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 | 5,070,027,334,570,364,000 | 31.809524 | 149 | 0.613809 | false |
mohamedhagag/community-addons | openeducat_erp/op_assignment/op_assignment.py | 1 | 2507 | # -*- coding: utf-8 -*-
###############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2009-TODAY Tech-Receptives(<http://www.techreceptives.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models, fields, api
class OpAssignment(models.Model):
_name = 'op.assignment'
name = fields.Char('Name', size=16, required=True)
course_id = fields.Many2one('op.course', 'Course', required=True)
standard_id = fields.Many2one('op.standard', 'Standard', required=True)
division_id = fields.Many2one('op.division', 'Division')
subject_id = fields.Many2one('op.subject', 'Subject', required=True)
faculty_id = fields.Many2one('op.faculty', 'Faculty', required=True)
marks = fields.Float('Marks')
description = fields.Text('Description', required=True)
type = fields.Many2one('op.exam.type', 'Type', required=True)
state = fields.Selection(
[('d', 'Draft'), ('p', 'Publish'), ('f', 'Finished')], 'State',
required=True, default='d')
issued_date = fields.Datetime('Issued Date', required=True)
submission_date = fields.Datetime('Submission Date', required=True)
allocation_ids = fields.Many2many('op.student', string='Allocated To')
assignment_sub_line = fields.One2many(
'op.assignment.sub.line', 'assignment_id', 'Submissions')
reviewer = fields.Many2one('op.faculty', 'Reviewer')
@api.one
def act_draft(self):
# Reminder:: Delete this method as it is not used.
self.state = 'd'
@api.one
def act_publish(self):
self.state = 'p'
@api.one
def act_finish(self):
self.state = 'f'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,984,240,560,048,603,000 | 40.098361 | 79 | 0.633426 | false |
olivierdalang/QGIS | tests/src/python/test_qgscoordinateoperationwidget.py | 15 | 13325 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for TestQgsCoordinateOperationWidget
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '19/12/2019'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
import os
from qgis.core import (
QgsProjUtils,
QgsDatumTransform,
QgsCoordinateReferenceSystem,
QgsCoordinateTransformContext)
from qgis.gui import QgsCoordinateOperationWidget
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsCoordinateOperationWidget(unittest.TestCase):
def testGettersSetters(self):
""" test widget getters/setters """
w = QgsCoordinateOperationWidget()
self.assertFalse(w.sourceCrs().isValid())
self.assertFalse(w.destinationCrs().isValid())
self.assertFalse(w.hasSelection())
self.assertFalse(w.availableOperations())
w.setSourceCrs(QgsCoordinateReferenceSystem('EPSG:28355'))
self.assertEqual(w.sourceCrs().authid(), 'EPSG:28355')
self.assertFalse(w.destinationCrs().isValid())
w.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:7855'))
self.assertEqual(w.sourceCrs().authid(), 'EPSG:28355')
self.assertEqual(w.destinationCrs().authid(), 'EPSG:7855')
@unittest.skipIf(QgsProjUtils.projVersionMajor() < 6, 'Not a proj6 build')
def testOperations(self):
w = QgsCoordinateOperationWidget()
self.assertFalse(w.hasSelection())
spy = QSignalSpy(w.operationChanged)
w.setSourceCrs(QgsCoordinateReferenceSystem('EPSG:26745'))
self.assertEqual(len(spy), 0)
w.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:3857'))
self.assertEqual(len(spy), 1)
self.assertTrue(w.hasSelection())
self.assertGreaterEqual(len(w.availableOperations()), 3)
self.assertEqual(w.defaultOperation().proj, '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertTrue(w.selectedOperation().isAvailable)
op = QgsCoordinateOperationWidget.OperationDetails()
op.proj = '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84'
op.allowFallback = True
w.setSelectedOperation(op)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertTrue(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 2)
w.setSelectedOperation(op)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertTrue(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 2)
op.proj = '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84'
op.allowFallback = False
w.setSelectedOperation(op)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertFalse(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 3)
op.allowFallback = True
w.setSelectedOperation(op)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertTrue(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 4)
context = QgsCoordinateTransformContext()
op.proj = '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84'
w.setSelectedOperation(op)
w.setSelectedOperationUsingContext(context)
# should go to default, because there's nothing in the context matching these crs
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=159 +z=175 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertEqual(len(spy), 6)
# put something in the context
context.addCoordinateOperation(w.sourceCrs(), w.destinationCrs(), '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
w.setSelectedOperationUsingContext(context)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertTrue(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 7)
context.addCoordinateOperation(w.sourceCrs(), w.destinationCrs(), '+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84', False)
w.setSelectedOperationUsingContext(context)
self.assertEqual(w.selectedOperation().proj,
'+proj=pipeline +step +proj=unitconvert +xy_in=us-ft +xy_out=m +step +inv +proj=lcc +lat_0=33.5 +lon_0=-118 +lat_1=35.4666666666667 +lat_2=34.0333333333333 +x_0=609601.219202438 +y_0=0 +ellps=clrk66 +step +proj=push +v_3 +step +proj=cart +ellps=clrk66 +step +proj=helmert +x=-8 +y=160 +z=176 +step +inv +proj=cart +ellps=WGS84 +step +proj=pop +v_3 +step +proj=webmerc +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84')
self.assertFalse(w.selectedOperation().allowFallback)
self.assertEqual(len(spy), 8)
@unittest.skipIf(os.environ.get('TRAVIS', '') == 'true' or QgsProjUtils.projVersionMajor() >= 6, 'Depends on local environment and grid presence')
def testOperationsCruftyProj(self):
w = QgsCoordinateOperationWidget()
self.assertFalse(w.hasSelection())
spy = QSignalSpy(w.operationChanged)
w.setSourceCrs(QgsCoordinateReferenceSystem('EPSG:4283'))
self.assertEqual(len(spy), 0)
w.setDestinationCrs(QgsCoordinateReferenceSystem('EPSG:7844'))
self.assertEqual(len(spy), 1)
self.assertTrue(w.hasSelection())
self.assertEqual(len(w.availableOperations()), 2)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.defaultOperation().sourceTransformId), '+nadgrids=GDA94_GDA2020_conformal_and_distortion.gsb')
self.assertEqual(w.defaultOperation().destinationTransformId, -1)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId), '+nadgrids=GDA94_GDA2020_conformal_and_distortion.gsb')
self.assertEqual(w.selectedOperation().destinationTransformId, -1)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.availableOperations()[1].sourceTransformId),
'+nadgrids=GDA94_GDA2020_conformal.gsb')
self.assertEqual(w.availableOperations()[1].destinationTransformId, -1)
op = QgsCoordinateOperationWidget.OperationDetails()
op.sourceTransformId = w.availableOperations()[1].sourceTransformId
w.setSelectedOperation(op)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId), '+nadgrids=GDA94_GDA2020_conformal.gsb')
self.assertEqual(len(spy), 2)
w.setSelectedOperation(op)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId), '+nadgrids=GDA94_GDA2020_conformal.gsb')
self.assertEqual(len(spy), 2)
op.sourceTransformId = w.availableOperations()[0].sourceTransformId
op.destinationTransformId = -1
w.setSelectedOperation(op)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId),
'+nadgrids=GDA94_GDA2020_conformal_and_distortion.gsb')
self.assertEqual(len(spy), 3)
op.destinationTransformId = w.availableOperations()[1].sourceTransformId
op.sourceTransformId = -1
w.setSelectedOperation(op)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId),
'+nadgrids=GDA94_GDA2020_conformal.gsb')
self.assertEqual(len(spy), 4)
op.destinationTransformId = w.availableOperations()[0].sourceTransformId
op.sourceTransformId = -1
w.setSelectedOperation(op)
self.assertEqual(QgsDatumTransform.datumTransformToProj(w.selectedOperation().sourceTransformId),
'+nadgrids=GDA94_GDA2020_conformal_and_distortion.gsb')
self.assertEqual(len(spy), 5)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -7,518,137,030,179,104,000 | 76.47093 | 495 | 0.683902 | false |
onceuponatimeforever/oh-mainline | vendor/packages/sphinx/sphinx/environment.py | 15 | 70053 | # -*- coding: utf-8 -*-
"""
sphinx.environment
~~~~~~~~~~~~~~~~~~
Global creation environment.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import os
import sys
import time
import types
import codecs
import imghdr
import string
import unicodedata
import cPickle as pickle
from os import path
from glob import glob
from itertools import izip, groupby
from docutils import nodes
from docutils.io import FileInput, NullOutput
from docutils.core import Publisher
from docutils.utils import Reporter, relative_path, get_source_line
from docutils.readers import standalone
from docutils.parsers.rst import roles, directives
from docutils.parsers.rst.languages import en as english
from docutils.parsers.rst.directives.html import MetaBody
from docutils.writers import UnfilteredWriter
from sphinx import addnodes
from sphinx.util import url_re, get_matching_docs, docname_join, split_into, \
FilenameUniqDict
from sphinx.util.nodes import clean_astext, make_refnode, WarningStream
from sphinx.util.osutil import SEP, fs_encoding, find_catalog_files
from sphinx.util.matching import compile_matchers
from sphinx.util.pycompat import class_types
from sphinx.util.websupport import is_commentable
from sphinx.errors import SphinxError, ExtensionError
from sphinx.locale import _
from sphinx.versioning import add_uids, merge_doctrees
from sphinx.transforms import DefaultSubstitutions, MoveModuleTargets, \
HandleCodeBlocks, SortIds, CitationReferences, Locale, \
RemoveTranslatableInline, SphinxContentsFilter
orig_role_function = roles.role
orig_directive_function = directives.directive
class ElementLookupError(Exception): pass
default_settings = {
'embed_stylesheet': False,
'cloak_email_addresses': True,
'pep_base_url': 'http://www.python.org/dev/peps/',
'rfc_base_url': 'http://tools.ietf.org/html/',
'input_encoding': 'utf-8-sig',
'doctitle_xform': False,
'sectsubtitle_xform': False,
'halt_level': 5,
'file_insertion_enabled': True,
}
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
ENV_VERSION = 42 + (sys.version_info[0] - 2)
dummy_reporter = Reporter('', 4, 4)
versioning_conditions = {
'none': False,
'text': nodes.TextElement,
'commentable': is_commentable,
}
class NoUri(Exception):
"""Raised by get_relative_uri if there is no URI available."""
pass
class SphinxStandaloneReader(standalone.Reader):
"""
Add our own transforms.
"""
transforms = [Locale, CitationReferences, DefaultSubstitutions,
MoveModuleTargets, HandleCodeBlocks, SortIds,
RemoveTranslatableInline]
def get_transforms(self):
return standalone.Reader.get_transforms(self) + self.transforms
class SphinxDummyWriter(UnfilteredWriter):
supported = ('html',) # needed to keep "meta" nodes
def translate(self):
pass
class BuildEnvironment:
"""
The environment in which the ReST files are translated.
Stores an inventory of cross-file targets and provides doctree
transformations to resolve links to them.
"""
# --------- ENVIRONMENT PERSISTENCE ----------------------------------------
@staticmethod
def frompickle(config, filename):
picklefile = open(filename, 'rb')
try:
env = pickle.load(picklefile)
finally:
picklefile.close()
if env.version != ENV_VERSION:
raise IOError('env version not current')
env.config.values = config.values
return env
def topickle(self, filename):
# remove unpicklable attributes
warnfunc = self._warnfunc
self.set_warnfunc(None)
values = self.config.values
del self.config.values
domains = self.domains
del self.domains
picklefile = open(filename, 'wb')
# remove potentially pickling-problematic values from config
for key, val in vars(self.config).items():
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
isinstance(val, class_types):
del self.config[key]
try:
pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
finally:
picklefile.close()
# reset attributes
self.domains = domains
self.config.values = values
self.set_warnfunc(warnfunc)
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, srcdir, doctreedir, config):
self.doctreedir = doctreedir
self.srcdir = srcdir
self.config = config
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None
# the application object; only set while update() runs
self.app = None
# all the registered domains, set by the application
self.domains = {}
# the docutils settings for building
self.settings = default_settings.copy()
self.settings['env'] = self
# the function to write warning messages with
self._warnfunc = None
# this is to invalidate old pickles
self.version = ENV_VERSION
# make this a set for faster testing
self._nitpick_ignore = set(self.config.nitpick_ignore)
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
self.found_docs = set() # contains all existing docnames
self.all_docs = {} # docname -> mtime at the time of build
# contains all built docnames
self.dependencies = {} # docname -> set of dependent file
# names, relative to documentation root
self.reread_always = set() # docnames to re-read unconditionally on
# next build
# File metadata
self.metadata = {} # docname -> dict of metadata items
# TOC inventory
self.titles = {} # docname -> title node
self.longtitles = {} # docname -> title node; only different if
# set differently with title directive
self.tocs = {} # docname -> table of contents nodetree
self.toc_num_entries = {} # docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
self.toc_secnumbers = {} # docname -> dict of sectionid -> number
self.toctree_includes = {} # docname -> list of toctree includefiles
self.files_to_rebuild = {} # docname -> set of files
# (containing its TOCs) to rebuild too
self.glob_toctrees = set() # docnames that have :glob: toctrees
self.numbered_toctrees = set() # docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
self.domaindata = {} # domainname -> domain-specific dict
# Other inventories
self.citations = {} # citation name -> docname, labelid
self.indexentries = {} # docname -> list of
# (type, string, target, aliasname)
self.versionchanges = {} # version -> list of (type, docname,
# lineno, module, descname, content)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict()
self.dlfiles = FilenameUniqDict()
# temporary data storage while reading a document
self.temp_data = {}
def set_warnfunc(self, func):
self._warnfunc = func
self.settings['warning_stream'] = WarningStream(func)
def set_versioning_method(self, method):
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
versioning method can share the same doctree directory. Therefore, we
raise an exception if the user tries to use an environment with an
incompatible versioning method.
"""
if method not in versioning_conditions:
raise ValueError('invalid versioning method: %r' % method)
condition = versioning_conditions[method]
if self.versioning_condition not in (None, condition):
raise SphinxError('This environment is incompatible with the '
'selected builder, please choose another '
'doctree directory.')
self.versioning_condition = condition
def warn(self, docname, msg, lineno=None):
# strange argument order is due to backwards compatibility
self._warnfunc(msg, (docname, lineno))
def warn_node(self, msg, node):
self._warnfunc(msg, '%s:%s' % get_source_line(node))
def clear_doc(self, docname):
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
self.reread_always.discard(docname)
self.metadata.pop(docname, None)
self.dependencies.pop(docname, None)
self.titles.pop(docname, None)
self.longtitles.pop(docname, None)
self.tocs.pop(docname, None)
self.toc_secnumbers.pop(docname, None)
self.toc_num_entries.pop(docname, None)
self.toctree_includes.pop(docname, None)
self.indexentries.pop(docname, None)
self.glob_toctrees.discard(docname)
self.numbered_toctrees.discard(docname)
self.images.purge_doc(docname)
self.dlfiles.purge_doc(docname)
for subfn, fnset in self.files_to_rebuild.items():
fnset.discard(docname)
if not fnset:
del self.files_to_rebuild[subfn]
for key, (fn, _) in self.citations.items():
if fn == docname:
del self.citations[key]
for version, changes in self.versionchanges.items():
new = [change for change in changes if change[1] != docname]
changes[:] = new
for domain in self.domains.values():
domain.clear_doc(docname)
def doc2path(self, docname, base=True, suffix=None):
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
If *base* is None, return relative path to self.srcdir.
If *base* is a path string, return absolute path under that.
If *suffix* is not None, add it instead of config.source_suffix.
"""
docname = docname.replace(SEP, path.sep)
suffix = suffix or self.config.source_suffix
if base is True:
return path.join(self.srcdir, docname) + suffix
elif base is None:
return docname + suffix
else:
return path.join(base, docname) + suffix
def relfn2path(self, filename, docname=None):
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
In the input "filename", absolute filenames are taken as relative to the
source dir, while relative filenames are relative to the dir of the
containing document.
"""
if filename.startswith('/') or filename.startswith(os.sep):
rel_fn = filename[1:]
else:
docdir = path.dirname(self.doc2path(docname or self.docname,
base=None))
rel_fn = path.join(docdir, filename)
try:
# the path.abspath() might seem redundant, but otherwise artifacts
# such as ".." will remain in the path
return rel_fn, path.abspath(path.join(self.srcdir, rel_fn))
except UnicodeDecodeError:
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn))
def find_files(self, config):
"""Find all source files in the source dir and put them in
self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
config.html_extra_path +
config.exclude_trees +
[d + config.source_suffix for d in config.unused_docs] +
['**/' + d for d in config.exclude_dirnames] +
['**/_sources', '.#*']
)
self.found_docs = set(get_matching_docs(
self.srcdir, config.source_suffix, exclude_matchers=matchers))
# add catalog mo file dependency
for docname in self.found_docs:
catalog_files = find_catalog_files(
docname,
self.srcdir,
self.config.locale_dirs,
self.config.language,
self.config.gettext_compact)
for filename in catalog_files:
self.dependencies.setdefault(docname, set()).add(filename)
def get_outdated_files(self, config_changed):
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
added = set()
changed = set()
if config_changed:
# config values affect e.g. substitutions
added = self.found_docs
else:
for docname in self.found_docs:
if docname not in self.all_docs:
added.add(docname)
continue
# if the doctree file is not there, rebuild
if not path.isfile(self.doc2path(docname, self.doctreedir,
'.doctree')):
changed.add(docname)
continue
# check the "reread always" list
if docname in self.reread_always:
changed.add(docname)
continue
# check the mtime of the document
mtime = self.all_docs[docname]
newmtime = path.getmtime(self.doc2path(docname))
if newmtime > mtime:
changed.add(docname)
continue
# finally, check the mtime of dependencies
for dep in self.dependencies.get(docname, ()):
try:
# this will do the right thing when dep is absolute too
deppath = path.join(self.srcdir, dep)
if not path.isfile(deppath):
changed.add(docname)
break
depmtime = path.getmtime(deppath)
if depmtime > mtime:
changed.add(docname)
break
except EnvironmentError:
# give it another chance
changed.add(docname)
break
return added, changed, removed
def update(self, config, srcdir, doctreedir, app=None):
"""(Re-)read all files new or changed since last update.
Returns a summary, the total count of documents to reread and an
iterator that yields docnames as it processes them. Store all
environment docnames in the canonical format (ie using SEP as a
separator in place of os.path.sep).
"""
config_changed = False
if self.config is None:
msg = '[new config] '
config_changed = True
else:
# check if a config value was changed that affects how
# doctrees are read
for key, descr in config.values.iteritems():
if descr[1] != 'env':
continue
if self.config[key] != config[key]:
msg = '[config changed] '
config_changed = True
break
else:
msg = ''
# this value is not covered by the above loop because it is handled
# specially by the config class
if self.config.extensions != config.extensions:
msg = '[extensions changed] '
config_changed = True
# the source and doctree directories may have been relocated
self.srcdir = srcdir
self.doctreedir = doctreedir
self.find_files(config)
self.config = config
added, changed, removed = self.get_outdated_files(config_changed)
# allow user intervention as well
for docs in app.emit('env-get-outdated', self, added, changed, removed):
changed.update(set(docs) & self.found_docs)
# if files were added or removed, all documents with globbed toctrees
# must be reread
if added or removed:
# ... but not those that already were removed
changed.update(self.glob_toctrees & self.found_docs)
msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
len(removed))
def update_generator():
self.app = app
# clear all files no longer present
for docname in removed:
if app:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
# read all new and changed files
for docname in sorted(added | changed):
yield docname
self.read_doc(docname, app=app)
if config.master_doc not in self.all_docs:
self.warn(None, 'master file %s not found' %
self.doc2path(config.master_doc))
self.app = None
if app:
app.emit('env-updated', self)
return msg, len(added | changed), update_generator()
def check_dependents(self, already):
to_rewrite = self.assign_section_numbers()
for docname in to_rewrite:
if docname not in already:
yield docname
# --------- SINGLE FILE READING --------------------------------------------
def warn_and_replace(self, error):
"""Custom decoding error handler that warns and replaces."""
linestart = error.object.rfind('\n', 0, error.start)
lineend = error.object.find('\n', error.start)
if lineend == -1: lineend = len(error.object)
lineno = error.object.count('\n', 0, error.start) + 1
self.warn(self.docname, 'undecodable source characters, '
'replacing with "?": %r' %
(error.object[linestart+1:error.start] + '>>>' +
error.object[error.start:error.end] + '<<<' +
error.object[error.end:lineend]), lineno)
return (u'?', error.end)
def lookup_domain_element(self, type, name):
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
"""
name = name.lower()
# explicit domain given?
if ':' in name:
domain_name, name = name.split(':', 1)
if domain_name in self.domains:
domain = self.domains[domain_name]
element = getattr(domain, type)(name)
if element is not None:
return element, []
# else look in the default domain
else:
def_domain = self.temp_data.get('default_domain')
if def_domain is not None:
element = getattr(def_domain, type)(name)
if element is not None:
return element, []
# always look in the std domain
element = getattr(self.domains['std'], type)(name)
if element is not None:
return element, []
raise ElementLookupError
def patch_lookup_functions(self):
"""Monkey-patch directive and role dispatch, so that domain-specific
markup takes precedence.
"""
def directive(name, lang_module, document):
try:
return self.lookup_domain_element('directive', name)
except ElementLookupError:
return orig_directive_function(name, lang_module, document)
def role(name, lang_module, lineno, reporter):
try:
return self.lookup_domain_element('role', name)
except ElementLookupError:
return orig_role_function(name, lang_module, lineno, reporter)
directives.directive = directive
roles.role = role
def read_doc(self, docname, src_path=None, save_parsed=True, app=None):
"""Parse a file and add/update inventory entries for the doctree.
If srcpath is given, read from a different source file.
"""
# remove all inventory entries for that file
if app:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
if src_path is None:
src_path = self.doc2path(docname)
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
self.temp_data['default_domain'] = \
self.domains.get(self.config.primary_domain)
self.settings['input_encoding'] = self.config.source_encoding
self.settings['trim_footnote_reference_space'] = \
self.config.trim_footnote_reference_space
self.settings['gettext_compact'] = self.config.gettext_compact
self.patch_lookup_functions()
if self.config.default_role:
role_fn, messages = roles.role(self.config.default_role, english,
0, dummy_reporter)
if role_fn:
roles._roles[''] = role_fn
else:
self.warn(docname, 'default role %s not found' %
self.config.default_role)
codecs.register_error('sphinx', self.warn_and_replace)
class SphinxSourceClass(FileInput):
def __init__(self_, *args, **kwds):
# don't call sys.exit() on IOErrors
kwds['handle_io_errors'] = False
FileInput.__init__(self_, *args, **kwds)
def decode(self_, data):
if isinstance(data, unicode):
return data
return data.decode(self_.encoding, 'sphinx')
def read(self_):
data = FileInput.read(self_)
if app:
arg = [data]
app.emit('source-read', docname, arg)
data = arg[0]
if self.config.rst_epilog:
data = data + '\n' + self.config.rst_epilog + '\n'
if self.config.rst_prolog:
data = self.config.rst_prolog + '\n' + data
return data
# publish manually
pub = Publisher(reader=SphinxStandaloneReader(),
writer=SphinxDummyWriter(),
source_class=SphinxSourceClass,
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
pub.set_source(None, src_path.encode(fs_encoding))
pub.set_destination(None, None)
try:
pub.publish()
doctree = pub.document
except UnicodeError, err:
raise SphinxError(str(err))
# post-processing
self.filter_messages(doctree)
self.process_dependencies(docname, doctree)
self.process_images(docname, doctree)
self.process_downloads(docname, doctree)
self.process_metadata(docname, doctree)
self.process_refonly_bullet_lists(docname, doctree)
self.create_title_from(docname, doctree)
self.note_indexentries_from(docname, doctree)
self.note_citations_from(docname, doctree)
self.build_toc_from(docname, doctree)
for domain in self.domains.itervalues():
domain.process_doc(self, docname, doctree)
# allow extension-specific post-processing
if app:
app.emit('doctree-read', doctree)
# store time of build, for outdated files detection
# (Some filesystems have coarse timestamp resolution;
# therefore time.time() can be older than filesystem's timestamp.
# For example, FAT32 has 2sec timestamp resolution.)
self.all_docs[docname] = max(
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
# get old doctree
try:
f = open(self.doc2path(docname,
self.doctreedir, '.doctree'), 'rb')
try:
old_doctree = pickle.load(f)
finally:
f.close()
except EnvironmentError:
old_doctree = None
# add uids for versioning
if old_doctree is None:
list(add_uids(doctree, self.versioning_condition))
else:
list(merge_doctrees(
old_doctree, doctree, self.versioning_condition))
# make it picklable
doctree.reporter = None
doctree.transformer = None
doctree.settings.warning_stream = None
doctree.settings.env = None
doctree.settings.record_dependencies = None
for metanode in doctree.traverse(MetaBody.meta):
# docutils' meta nodes aren't picklable because the class is nested
metanode.__class__ = addnodes.meta
# cleanup
self.temp_data.clear()
if save_parsed:
# save the parsed doctree
doctree_filename = self.doc2path(docname, self.doctreedir,
'.doctree')
dirname = path.dirname(doctree_filename)
if not path.isdir(dirname):
os.makedirs(dirname)
f = open(doctree_filename, 'wb')
try:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
finally:
f.close()
else:
return doctree
# utilities to use while reading a document
@property
def docname(self):
"""Backwards compatible alias."""
return self.temp_data['docname']
@property
def currmodule(self):
"""Backwards compatible alias."""
return self.temp_data.get('py:module')
@property
def currclass(self):
"""Backwards compatible alias."""
return self.temp_data.get('py:class')
def new_serialno(self, category=''):
"""Return a serial number, e.g. for index entry targets."""
key = category + 'serialno'
cur = self.temp_data.get(key, 0)
self.temp_data[key] = cur + 1
return cur
def note_dependency(self, filename):
self.dependencies.setdefault(self.docname, set()).add(filename)
def note_reread(self):
self.reread_always.add(self.docname)
def note_versionchange(self, type, version, node, lineno):
self.versionchanges.setdefault(version, []).append(
(type, self.temp_data['docname'], lineno,
self.temp_data.get('py:module'),
self.temp_data.get('object'), node.astext()))
# post-processing of read doctrees
def filter_messages(self, doctree):
"""Filter system messages from a doctree."""
filterlevel = self.config.keep_warnings and 2 or 5
for node in doctree.traverse(nodes.system_message):
if node['level'] < filterlevel:
self.app.debug('%s [filtered system message]', node.astext())
node.parent.remove(node)
def process_dependencies(self, docname, doctree):
"""Process docutils-generated dependency info."""
cwd = os.getcwd()
frompath = path.join(path.normpath(self.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
return
for dep in deps.list:
# the dependency path is relative to the working dir, so get
# one relative to the srcdir
relpath = relative_path(frompath,
path.normpath(path.join(cwd, dep)))
self.dependencies.setdefault(docname, set()).add(relpath)
def process_downloads(self, docname, doctree):
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
rel_filename, filename = self.relfn2path(targetname, docname)
self.dependencies.setdefault(docname, set()).add(rel_filename)
if not os.access(filename, os.R_OK):
self.warn_node('download file not readable: %s' % filename,
node)
continue
uniquename = self.dlfiles.add_file(docname, filename)
node['filename'] = uniquename
def process_images(self, docname, doctree):
"""Process and rewrite image URIs."""
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
node['candidates'] = candidates = {}
imguri = node['uri']
if imguri.find('://') != -1:
self.warn_node('nonlocal image URI found: %s' % imguri, node)
candidates['?'] = imguri
continue
rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
# set imgpath as default URI
node['uri'] = rel_imgpath
if rel_imgpath.endswith(os.extsep + '*'):
for filename in glob(full_imgpath):
new_imgpath = relative_path(self.srcdir, filename)
if filename.lower().endswith('.pdf'):
candidates['application/pdf'] = new_imgpath
elif filename.lower().endswith('.svg'):
candidates['image/svg+xml'] = new_imgpath
else:
try:
f = open(filename, 'rb')
try:
imgtype = imghdr.what(f)
finally:
f.close()
except (OSError, IOError), err:
self.warn_node('image file %s not readable: %s' %
(filename, err), node)
if imgtype:
candidates['image/' + imgtype] = new_imgpath
else:
candidates['*'] = rel_imgpath
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in candidates.itervalues():
self.dependencies.setdefault(docname, set()).add(imgpath)
if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
self.warn_node('image file not readable: %s' % imgpath,
node)
continue
self.images.add_file(docname, imgpath)
def process_metadata(self, docname, doctree):
"""Process the docinfo part of the doctree as metadata.
Keep processing minimal -- just return what docutils says.
"""
self.metadata[docname] = md = {}
try:
docinfo = doctree[0]
except IndexError:
# probably an empty document
return
if docinfo.__class__ is not nodes.docinfo:
# nothing to see here
return
for node in docinfo:
# nodes are multiply inherited...
if isinstance(node, nodes.authors):
md['authors'] = [author.astext() for author in node]
elif isinstance(node, nodes.TextElement): # e.g. author
md[node.__class__.__name__] = node.astext()
else:
name, body = node
md[name.astext()] = body.astext()
del doctree[0]
def process_refonly_bullet_lists(self, docname, doctree):
"""Change refonly bullet lists to use compact_paragraphs.
Specifically implemented for 'Indices and Tables' section, which looks
odd when html_compact_lists is false.
"""
if self.config.html_compact_lists:
return
class RefOnlyListChecker(nodes.GenericNodeVisitor):
"""Raise `nodes.NodeFound` if non-simple list item is encountered.
Here 'simple' means a list item containing only a paragraph with a
single reference in it.
"""
def default_visit(self, node):
raise nodes.NodeFound
def visit_bullet_list(self, node):
pass
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if len(children) != 1:
raise nodes.NodeFound
if not isinstance(children[0], nodes.paragraph):
raise nodes.NodeFound
para = children[0]
if len(para) != 1:
raise nodes.NodeFound
if not isinstance(para[0], addnodes.pending_xref):
raise nodes.NodeFound
raise nodes.SkipChildren
def invisible_visit(self, node):
"""Invisible nodes should be ignored."""
pass
def check_refonly_list(node):
"""Check for list with only references in it."""
visitor = RefOnlyListChecker(doctree)
try:
node.walk(visitor)
except nodes.NodeFound:
return False
else:
return True
for node in doctree.traverse(nodes.bullet_list):
if check_refonly_list(node):
for item in node.traverse(nodes.list_item):
para = item[0]
ref = para[0]
compact_para = addnodes.compact_paragraph()
compact_para += ref
item.replace(para, compact_para)
def create_title_from(self, docname, document):
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
titlenode = nodes.title()
longtitlenode = titlenode
# explicit title set with title directive; use this only for
# the <title> tag in HTML output
if document.has_key('title'):
longtitlenode = nodes.title()
longtitlenode += nodes.Text(document['title'])
# look for first section title and use that as the title
for node in document.traverse(nodes.section):
visitor = SphinxContentsFilter(document)
node[0].walkabout(visitor)
titlenode += visitor.get_entry_text()
break
else:
# document has no title
titlenode += nodes.Text('<no title>')
self.titles[docname] = titlenode
self.longtitles[docname] = longtitlenode
def note_indexentries_from(self, docname, document):
entries = self.indexentries[docname] = []
for node in document.traverse(addnodes.index):
entries.extend(node['entries'])
def note_citations_from(self, docname, document):
for node in document.traverse(nodes.citation):
label = node[0].astext()
if label in self.citations:
self.warn_node('duplicate citation %s, ' % label +
'other instance in %s' % self.doc2path(
self.citations[label][0]), node)
self.citations[label] = (docname, node['ids'][0])
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
if toctreenode['glob']:
self.glob_toctrees.add(docname)
if toctreenode.get('numbered'):
self.numbered_toctrees.add(docname)
includefiles = toctreenode['includefiles']
for includefile in includefiles:
# note that if the included file is rebuilt, this one must be
# too (since the TOC of the included file could have changed)
self.files_to_rebuild.setdefault(includefile, set()).add(docname)
self.toctree_includes.setdefault(docname, []).extend(includefiles)
def build_toc_from(self, docname, document):
"""Build a TOC from the doctree and store it in the inventory."""
numentries = [0] # nonlocal again...
try:
maxdepth = int(self.metadata[docname].get('tocdepth', 0))
except ValueError:
maxdepth = 0
def traverse_in_section(node, cls):
"""Like traverse(), but stay within the same section."""
result = []
if isinstance(node, cls):
result.append(node)
for child in node.children:
if isinstance(child, nodes.section):
continue
result.extend(traverse_in_section(child, cls))
return result
def build_toc(node, depth=1):
entries = []
for sectionnode in node:
# find all toctree nodes in this section and add them
# to the toc (just copying the toctree node which is then
# resolved in self.get_and_resolve_doctree)
if isinstance(sectionnode, addnodes.only):
onlynode = addnodes.only(expr=sectionnode['expr'])
blist = build_toc(sectionnode, depth)
if blist:
onlynode += blist.children
entries.append(onlynode)
if not isinstance(sectionnode, nodes.section):
for toctreenode in traverse_in_section(sectionnode,
addnodes.toctree):
item = toctreenode.copy()
entries.append(item)
# important: do the inventory stuff
self.note_toctree(docname, toctreenode)
continue
title = sectionnode[0]
# copy the contents of the section title, but without references
# and unnecessary stuff
visitor = SphinxContentsFilter(document)
title.walkabout(visitor)
nodetext = visitor.get_entry_text()
if not numentries[0]:
# for the very first toc entry, don't add an anchor
# as it is the file's title anyway
anchorname = ''
else:
anchorname = '#' + sectionnode['ids'][0]
numentries[0] += 1
# make these nodes:
# list_item -> compact_paragraph -> reference
reference = nodes.reference(
'', '', internal=True, refuri=docname,
anchorname=anchorname, *nodetext)
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
sub_item = build_toc(sectionnode, depth + 1)
if maxdepth == 0 or depth < maxdepth:
item += sub_item
entries.append(item)
if entries:
return nodes.bullet_list('', *entries)
return []
toc = build_toc(document)
if toc:
self.tocs[docname] = toc
else:
self.tocs[docname] = nodes.bullet_list('')
self.toc_num_entries[docname] = numentries[0]
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
try:
toc = self.tocs[docname].deepcopy()
except KeyError:
# the document does not exist anymore: return a dummy node that
# renders to nothing
return nodes.paragraph()
self.process_only_nodes(toc, builder, docname)
for node in toc.traverse(nodes.reference):
node['refuri'] = node['anchorname'] or '#'
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
doctree = self.get_doctree(self.config.master_doc)
toctrees = []
if 'includehidden' not in kwds:
kwds['includehidden'] = True
if 'maxdepth' not in kwds:
kwds['maxdepth'] = 0
kwds['collapse'] = collapse
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = self.resolve_toctree(docname, builder, toctreenode,
prune=True, **kwds)
if toctree:
toctrees.append(toctree)
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
result.extend(toctree.children)
return result
def get_domain(self, domainname):
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
"""
try:
return self.domains[domainname]
except KeyError:
raise ExtensionError('Domain %r is not registered' % domainname)
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
f = open(doctree_filename, 'rb')
try:
doctree = pickle.load(f)
finally:
f.close()
doctree.settings.env = self
doctree.reporter = Reporter(self.doc2path(docname), 2, 5,
stream=WarningStream(self._warnfunc))
return doctree
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
if doctree is None:
doctree = self.get_doctree(docname)
# resolve all pending cross-references
self.resolve_references(doctree, docname, builder)
# now, resolve all toctree nodes
for toctreenode in doctree.traverse(addnodes.toctree):
result = self.resolve_toctree(docname, builder, toctreenode,
prune=prune_toctrees, includehidden=includehidden)
if result is None:
toctreenode.replace_self([])
else:
toctreenode.replace_self(result)
return doctree
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is True, only toplevel document titles will be in the
resulting tree.
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
if toctree.get('hidden', False) and not includehidden:
return None
# For reading the following two helper function, it is useful to keep
# in mind the node structure of a toctree (using HTML-like node names
# for brevity):
#
# <ul>
# <li>
# <p><a></p>
# <p><a></p>
# ...
# <ul>
# ...
# </ul>
# </li>
# </ul>
#
# The transformation is made in two passes in order to avoid
# interactions between marking and pruning the tree (see bug #1046).
def _toctree_prune(node, depth, maxdepth):
"""Utility: Cut a TOC at a specified depth."""
for subnode in node.children[:]:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)):
# for <p> and <li>, just recurse
_toctree_prune(subnode, depth, maxdepth)
elif isinstance(subnode, nodes.bullet_list):
# for <ul>, determine if the depth is too large or if the
# entry is to be collapsed
if maxdepth > 0 and depth > maxdepth:
subnode.parent.replace(subnode, [])
else:
# cull sub-entries whose parents aren't 'current'
if (collapse and depth > 1 and
'iscurrent' not in subnode.parent):
subnode.parent.remove(subnode)
else:
# recurse on visible children
_toctree_prune(subnode, depth+1, maxdepth)
def _toctree_add_classes(node, depth):
"""Add 'toctree-l%d' and 'current' classes to the toctree."""
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)):
# for <p> and <li>, indicate the depth level and recurse
subnode['classes'].append('toctree-l%d' % (depth-1))
_toctree_add_classes(subnode, depth)
elif isinstance(subnode, nodes.bullet_list):
# for <ul>, just recurse
_toctree_add_classes(subnode, depth+1)
elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current
# document and therefore may not be collapsed
if subnode['refuri'] == docname:
if not subnode['anchorname']:
# give the whole branch a 'current' class
# (useful for styling it differently)
branchnode = subnode
while branchnode:
branchnode['classes'].append('current')
branchnode = branchnode.parent
# mark the list_item as "on current page"
if subnode.parent.parent.get('iscurrent'):
# but only if it's not already done
return
while subnode:
subnode['iscurrent'] = True
subnode = subnode.parent
def _entries_from_toctree(toctreenode, parents,
separate=False, subtree=False):
"""Return TOC entries for a toctree node."""
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = []
for (title, ref) in refs:
try:
refdoc = None
if url_re.match(ref):
reference = nodes.reference('', '', internal=False,
refuri=ref, anchorname='',
*[nodes.Text(title)])
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
toc = nodes.bullet_list('', item)
elif ref == 'self':
# 'self' refers to the document from which this
# toctree originates
ref = toctreenode['parent']
if not title:
title = clean_astext(self.titles[ref])
reference = nodes.reference('', '', internal=True,
refuri=ref,
anchorname='',
*[nodes.Text(title)])
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
# don't show subitems
toc = nodes.bullet_list('', item)
else:
if ref in parents:
self.warn(ref, 'circular toctree references '
'detected, ignoring: %s <- %s' %
(ref, ' <- '.join(parents)))
continue
refdoc = ref
toc = self.tocs[ref].deepcopy()
self.process_only_nodes(toc, builder, ref)
if title and toc.children and len(toc.children) == 1:
child = toc.children[0]
for refnode in child.traverse(nodes.reference):
if refnode['refuri'] == ref and \
not refnode['anchorname']:
refnode.children = [nodes.Text(title)]
if not toc.children:
# empty toc means: no titles will show up in the toctree
self.warn_node(
'toctree contains reference to document %r that '
'doesn\'t have a title: no link will be generated'
% ref, toctreenode)
except KeyError:
# this is raised if the included file does not exist
self.warn_node(
'toctree contains reference to nonexisting document %r'
% ref, toctreenode)
else:
# if titles_only is given, only keep the main title and
# sub-toctrees
if titles_only:
# delete everything but the toplevel title(s)
# and toctrees
for toplevel in toc:
# nodes with length 1 don't have any children anyway
if len(toplevel) > 1:
subtrees = toplevel.traverse(addnodes.toctree)
toplevel[1][:] = subtrees
# resolve all sub-toctrees
for toctreenode in toc.traverse(addnodes.toctree):
if not (toctreenode.get('hidden', False)
and not includehidden):
i = toctreenode.parent.index(toctreenode) + 1
for item in _entries_from_toctree(
toctreenode, [refdoc] + parents,
subtree=True):
toctreenode.parent.insert(i, item)
i += 1
toctreenode.parent.remove(toctreenode)
if separate:
entries.append(toc)
else:
entries.extend(toc.children)
if not subtree and not separate:
ret = nodes.bullet_list()
ret += entries
return [ret]
return entries
maxdepth = maxdepth or toctree.get('maxdepth', -1)
if not titles_only and toctree.get('titlesonly', False):
titles_only = True
if not includehidden and toctree.get('includehidden', False):
includehidden = True
# NOTE: previously, this was separate=True, but that leads to artificial
# separation when two or more toctree entries form a logical unit, so
# separating mode is no longer used -- it's kept here for history's sake
tocentries = _entries_from_toctree(toctree, [], separate=False)
if not tocentries:
return None
newnode = addnodes.compact_paragraph('', '', *tocentries)
newnode['toctree'] = True
# prune the tree to maxdepth, also set toc depth and current classes
_toctree_add_classes(newnode, 1)
_toctree_prune(newnode, 1, prune and maxdepth or 0)
# set the target paths in the toctrees (they are not known at TOC
# generation time)
for refnode in newnode.traverse(nodes.reference):
if not url_re.match(refnode['refuri']):
refnode['refuri'] = builder.get_relative_uri(
docname, refnode['refuri']) + refnode['anchorname']
return newnode
def resolve_references(self, doctree, fromdocname, builder):
for node in doctree.traverse(addnodes.pending_xref):
contnode = node[0].deepcopy()
newnode = None
typ = node['reftype']
target = node['reftarget']
refdoc = node.get('refdoc', fromdocname)
domain = None
try:
if 'refdomain' in node and node['refdomain']:
# let the domain try to resolve the reference
try:
domain = self.domains[node['refdomain']]
except KeyError:
raise NoUri
newnode = domain.resolve_xref(self, fromdocname, builder,
typ, target, node, contnode)
# really hardwired reference types
elif typ == 'doc':
# directly reference to document by source name;
# can be absolute or relative
docname = docname_join(refdoc, target)
if docname in self.all_docs:
if node['refexplicit']:
# reference with explicit title
caption = node.astext()
else:
caption = clean_astext(self.titles[docname])
innernode = nodes.emphasis(caption, caption)
newnode = nodes.reference('', '', internal=True)
newnode['refuri'] = builder.get_relative_uri(
fromdocname, docname)
newnode.append(innernode)
elif typ == 'citation':
docname, labelid = self.citations.get(target, ('', ''))
if docname:
try:
newnode = make_refnode(builder, fromdocname,
docname, labelid, contnode)
except NoUri:
# remove the ids we added in the CitationReferences
# transform since they can't be transfered to
# the contnode (if it's a Text node)
if not isinstance(contnode, nodes.Element):
del node['ids'][:]
raise
elif 'ids' in node:
# remove ids attribute that annotated at
# transforms.CitationReference.apply.
del node['ids'][:]
# no new node found? try the missing-reference event
if newnode is None:
newnode = builder.app.emit_firstresult(
'missing-reference', self, node, contnode)
# still not found? warn if in nit-picky mode
if newnode is None:
self._warn_missing_reference(
fromdocname, typ, target, node, domain)
except NoUri:
newnode = contnode
node.replace_self(newnode or contnode)
# remove only-nodes that do not belong to our builder
self.process_only_nodes(doctree, builder, fromdocname)
# allow custom references to be resolved
builder.app.emit('doctree-resolved', doctree, fromdocname)
def _warn_missing_reference(self, fromdoc, typ, target, node, domain):
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
if self._nitpick_ignore:
dtype = domain and '%s:%s' % (domain.name, typ) or typ
if (dtype, target) in self._nitpick_ignore:
warn = False
if not warn:
return
if domain and typ in domain.dangling_warnings:
msg = domain.dangling_warnings[typ]
elif typ == 'doc':
msg = 'unknown document: %(target)s'
elif typ == 'citation':
msg = 'citation not found: %(target)s'
elif node.get('refdomain', 'std') != 'std':
msg = '%s:%s reference target not found: %%(target)s' % \
(node['refdomain'], typ)
else:
msg = '%s reference target not found: %%(target)s' % typ
self.warn_node(msg % {'target': target}, node)
def process_only_nodes(self, doctree, builder, fromdocname=None):
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
for node in doctree.traverse(addnodes.only):
try:
ret = builder.tags.eval_condition(node['expr'])
except Exception, err:
self.warn_node('exception while evaluating only '
'directive expression: %s' % err, node)
node.replace_self(node.children or nodes.comment())
else:
if ret:
node.replace_self(node.children or nodes.comment())
else:
node.replace_self(nodes.comment())
def assign_section_numbers(self):
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
old_secnumbers = self.toc_secnumbers
self.toc_secnumbers = {}
def _walk_toc(node, secnums, depth, titlenode=None):
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
if isinstance(subnode, nodes.bullet_list):
numstack.append(0)
_walk_toc(subnode, secnums, depth-1, titlenode)
numstack.pop()
titlenode = None
elif isinstance(subnode, nodes.list_item):
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.only):
# at this stage we don't know yet which sections are going
# to be included; just include all of them, even if it leads
# to gaps in the numbering
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
numstack[-1] += 1
if depth > 0:
number = tuple(numstack)
else:
number = None
secnums[subnode[0]['anchorname']] = \
subnode[0]['secnumber'] = number
if titlenode:
titlenode['secnumber'] = number
titlenode = None
elif isinstance(subnode, addnodes.toctree):
_walk_toctree(subnode, depth)
def _walk_toctree(toctreenode, depth):
if depth == 0:
return
for (title, ref) in toctreenode['entries']:
if url_re.match(ref) or ref == 'self':
# don't mess with those
continue
if ref in self.tocs:
secnums = self.toc_secnumbers[ref] = {}
_walk_toc(self.tocs[ref], secnums, depth,
self.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
for docname in self.numbered_toctrees:
doctree = self.get_doctree(docname)
for toctreenode in doctree.traverse(addnodes.toctree):
depth = toctreenode.get('numbered', 0)
if depth:
# every numbered toctree gets new numbering
numstack = [0]
_walk_toctree(toctreenode, depth)
return rewrite_needed
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
"""Create the real index from the collected index entries."""
new = {}
def add_entry(word, subword, link=True, dic=new):
entry = dic.get(word)
if not entry:
dic[word] = entry = [[], {}]
if subword:
add_entry(subword, '', link=link, dic=entry[1])
elif link:
try:
uri = builder.get_relative_uri('genindex', fn) + '#' + tid
except NoUri:
pass
else:
entry[0].append((main, uri))
for fn, entries in self.indexentries.iteritems():
# new entry types must be listed in directives/other.py!
for type, value, tid, main in entries:
try:
if type == 'single':
try:
entry, subentry = split_into(2, 'single', value)
except ValueError:
entry, = split_into(1, 'single', value)
subentry = ''
add_entry(entry, subentry)
elif type == 'pair':
first, second = split_into(2, 'pair', value)
add_entry(first, second)
add_entry(second, first)
elif type == 'triple':
first, second, third = split_into(3, 'triple', value)
add_entry(first, second+' '+third)
add_entry(second, third+', '+first)
add_entry(third, first+' '+second)
elif type == 'see':
first, second = split_into(2, 'see', value)
add_entry(first, _('see %s') % second, link=False)
elif type == 'seealso':
first, second = split_into(2, 'see', value)
add_entry(first, _('see also %s') % second, link=False)
else:
self.warn(fn, 'unknown index entry type %r' % type)
except ValueError, err:
self.warn(fn, str(err))
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry, lcletters=string.ascii_lowercase + '_'):
lckey = unicodedata.normalize('NFD', entry[0].lower())
if lckey[0:1] in lcletters:
return chr(127) + lckey
return lckey
newlist = new.items()
newlist.sort(key=keyfunc)
if group_entries:
# fixup entries: transform
# func() (in module foo)
# func() (in module bar)
# into
# func()
# (in module foo)
# (in module bar)
oldkey = ''
oldsubitems = None
i = 0
while i < len(newlist):
key, (targets, subitems) = newlist[i]
# cannot move if it has subitems; structure gets too complex
if not subitems:
m = _fixre.match(key)
if m:
if oldkey == m.group(1):
# prefixes match: add entry as subitem of the
# previous entry
oldsubitems.setdefault(m.group(2), [[], {}])[0].\
extend(targets)
del newlist[i]
continue
oldkey = m.group(1)
else:
oldkey = key
oldsubitems = subitems
i += 1
# group the entries by letter
def keyfunc2(item, letters=string.ascii_uppercase + '_'):
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void)) in v[1].iteritems())
# now calculate the key
letter = unicodedata.normalize('NFD', k[0])[0].upper()
if letter in letters:
return letter
else:
# get all other symbols under one heading
return _('Symbols')
return [(key, list(group))
for (key, group) in groupby(newlist, keyfunc2)]
def collect_relations(self):
relations = {}
getinc = self.toctree_includes.get
def collect(parents, parents_set, docname, previous, next):
# circular relationship?
if docname in parents_set:
# we will warn about this in resolve_toctree()
return
includes = getinc(docname)
# previous
if not previous:
# if no previous sibling, go to parent
previous = parents[0][0]
else:
# else, go to previous sibling, or if it has children, to
# the last of its children, or if that has children, to the
# last of those, and so forth
while 1:
previncs = getinc(previous)
if previncs:
previous = previncs[-1]
else:
break
# next
if includes:
# if it has children, go to first of them
next = includes[0]
elif next:
# else, if next sibling, go to it
pass
else:
# else, go to the next sibling of the parent, if present,
# else the grandparent's sibling, if present, and so forth
for parname, parindex in parents:
parincs = getinc(parname)
if parincs and parindex + 1 < len(parincs):
next = parincs[parindex+1]
break
# else it will stay None
# same for children
if includes:
for subindex, args in enumerate(izip(includes,
[None] + includes,
includes[1:] + [None])):
collect([(docname, subindex)] + parents,
parents_set.union([docname]), *args)
relations[docname] = [parents[0][0], previous, next]
collect([(None, 0)], set(), self.config.master_doc, None, None)
return relations
def check_consistency(self):
"""Do consistency checks."""
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
if docname == self.config.master_doc:
# the master file is not included anywhere ;)
continue
if 'orphan' in self.metadata[docname]:
continue
self.warn(docname, 'document isn\'t included in any toctree')
| agpl-3.0 | 3,677,243,080,164,542,000 | 41.456364 | 80 | 0.527401 | false |
eammx/proyectosWeb | proyectoPython/env/lib/python3.6/site-packages/pip/_vendor/ipaddress.py | 30 | 79875 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.23'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 network. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?')
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError('address out of range')
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError('address out of range')
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
list(addr1.address_exclude(addr2)) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
list(addr1.address_exclude(addr2)) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address) + 1
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
@staticmethod
def _is_subnet_of(a, b):
try:
# Always false if one is v4 and the other is v6.
if a._version != b._version:
raise TypeError(
"%s and %s are not of the same version" % (a, b))
return (b.network_address <= a.network_address and
b.broadcast_address >= a.broadcast_address)
except AttributeError:
raise TypeError("Unable to test subnet containment "
"between %s and %s" % (a, b))
def subnet_of(self, other):
"""Return True if this network is a subnet of other."""
return self._is_subnet_of(self, other)
def supernet_of(self, other):
"""Return True if this network is a supernet of other."""
return self._is_subnet_of(other, self)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
return (
self not in self._constants._public_network and
not self.is_private)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return (self.network < other.network or
self.network == other.network and address_less)
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| mit | -6,162,644,266,291,629,000 | 32.006198 | 86 | 0.56815 | false |
LeSam/avoplot | Plugins/FTIR/src/avoplot_ftir_plugin/ftir_spectrum.py | 3 | 14519 | import wx
import csv
import os
import os.path
import math
from scipy.special import erf
import scipy
import scipy.optimize
import numpy
from avoplot import plugins, series, controls, subplots
from avoplot.persist import PersistentStorage
from avoplot.plugins import AvoPlotPluginSimple
from avoplot.subplots import AvoPlotXYSubplot
from avoplot.series import XYDataSeries
from avoplot.gui import widgets
plugin_is_GPL_compatible = True
class FTIRSpectrumSubplot(AvoPlotXYSubplot):
#This is the "subplot" where the spectrum will appear
def my_init(self):
ax = self.get_mpl_axes()
ax.set_xlabel('Wavenumber (cm$^{-1}$)')
ax.set_ylabel('Absorbance')
self.__inverted_x = False
def add_data_series(self, data):
AvoPlotXYSubplot.add_data_series(self, data)
if not self.__inverted_x:
self.get_mpl_axes().invert_xaxis()
self.__inverted_x = True
#define new data series type for FTIR data
class FTIRSpectrumData(series.XYDataSeries):
def __init__(self, *args, **kwargs):
super(FTIRSpectrumData, self).__init__(*args, **kwargs)
#add a control for this data series to allow the user to change the
#frequency of the wave using a slider.
self.add_control_panel(BackgroundCalcCtrl(self))
@staticmethod
def get_supported_subplot_type():
return FTIRSpectrumSubplot
class FTIRPlugin(plugins.AvoPlotPluginSimple):
def __init__(self):
super(FTIRPlugin, self).__init__("FTIR Plugin", FTIRSpectrumData)
self.set_menu_entry(['FTIR', 'New Spectrum'], "Plot an FTIR spectrum")
def plot_into_subplot(self, subplot):
wavenumber, absorbance, spectrum_file = self.load_ftir_file()
if wavenumber is None:
return False
data_series = FTIRSpectrumData(os.path.basename(spectrum_file),
xdata=wavenumber,
ydata=absorbance)
subplot.add_data_series(data_series)
#TODO - setting the name here means that the figure gets renamed
#everytime that a series gets added
subplot.get_parent_element().set_name(os.path.basename(spectrum_file))
return True
def load_ftir_file(self):
persist = PersistentStorage()
try:
last_path_used = persist.get_value("ftir_spectra_dir")
except KeyError:
last_path_used = ""
#get filename to open
spectrum_file = wx.FileSelector("Choose spectrum file to open",
default_path=last_path_used)
if spectrum_file == "":
return None
persist.set_value("ftir_spectra_dir", os.path.dirname(spectrum_file))
reader = csv.reader(open(spectrum_file, "rb"), dialect="excel")
wavenumber = []
absorbance = []
for line in reader:
wavenumber.append(float(line[0]))
absorbance.append(float(line[1]))
try:
return wavenumber, absorbance, spectrum_file
#TODO see return above
except Exception,e:
print e.args
wx.MessageBox("Unable to load spectrum file \'%s\'. "
"Unrecognised file format."%spectrum_file,
"AvoPlot", wx.ICON_ERROR)
return None
#Start Extra Control Panel Functions -- created after adv_sine_wave example in AvoPlot documentation
class BackgroundCalcCtrl(controls.AvoPlotControlPanelBase):
"""
Control panel where the buttons to draw backgrounds will appear
"""
def __init__(self, series):
#call the parent class's __init__ method, passing it the name that we
#want to appear on the control panels tab.
super(BackgroundCalcCtrl, self).__init__("Background Fit")
#store the data series object that this control panel is associated with,
#so that we can access it later
self.series = series
self.wavenumber, self.absorbance = self.series.get_data()
def define_data(self):
wavenumber = self.wavenumber
absorbance = self.absorbance
return wavenumber, absorbance
def setup(self, parent):
super(BackgroundCalcCtrl, self).setup(parent)
#AvoPlotXYSubplot is a class, not an object/instance so you can't do this!
#also get_mpl_axes is a method - so you would need () to make this do what you intended
#self.axes = AvoPlotXYSubplot.get_mpl_axes
self.axes = self.series.get_parent_element().get_mpl_axes()
self.plot_obj = parent
spec_type = classify_spectrum
h2o_button = wx.Button(self, wx.ID_ANY, "Fit H2O")
self.peak_height_text = wx.StaticText(self, -1, "Peak Height:\n")
self.Add(self.peak_height_text)
self.Add(h2o_button, 0, wx.ALIGN_TOP|wx.ALL,border=10)
# sizer = wx.StaticText(self, -1, "Spec Type:\n%s"%spec_type, 0, wx.ALIGN_TOP|wx.ALL)
# sizer_peak_height = wx.sizer(self.peak_height_text,0,wx.ALIGN_TOP|wx.ALL)
# self.Add(sizer)
# self.Add(sizer_peak_height)
wx.EVT_BUTTON(self, h2o_button.GetId(), self.fit_h2o)
# self.SetSizer(sizer)
# self.sizer.Fit(self)
self.SetAutoLayout(True)
def set_peak_height(self, height):
self.peak_height_text.SetLabel("Peak Height:\n%f"%height)
def fit_h2o(self, evnt):
try:
wx.BeginBusyCursor()
bkgd = fit_h2o_peak(self.wavenumber, self.absorbance, self.axes, plot_fit=True)
#bkgd = fit_h2o_peak(self.wavenumber, self.absorbance, ax, plot_fit=True)
peak_height = calc_h2o_peak_height(self.wavenumber, self.absorbance, bkgd)
self.set_peak_height(peak_height)
self.series.update()
except ValueError, e:
wx.EndBusyCursor()
wx.MessageBox( 'Failed to find H2O background.\nReason:%s'%e.args[0], 'AvoPlot FTIR',wx.ICON_ERROR)
finally:
wx.EndBusyCursor()
def create_plot(self):
self.axes.plot(self.wavenumber, self.absorbance)
self.axes.set_xlim((self.axes.get_xlim()[1],self.axes.get_xlim()[0]))
self.axes.set_xlabel("Wavenumber")
self.axes.set_ylabel("Absorbance")
def get_h2o_fitting_points(xdata, ydata, bkgd_func=None,
target_wavenumber_range=200, tolerance=30):
"""
This function finds (target_wavenumber_range +- tolerance) number of points
from either side of the H2O peak. The points are selected from after the
minima on either side of the peak.
The H2O peak is searched for between 3000-4000 cm-1 wavenumber.
The bkgd_func arg can be used to subtract a global background from the
data before the points are searched for - this can be useful for very skewed
spectra. If the function can't find the fitting points without a
background function, then it calls itself again passing get_global_bkgd as
the bkgd_func argument.
"""
#initialise the cropping limits (these are in wavenumber)
l_crop = 2200
peak_l_crop = 3000
r_crop = 4000
#make a copy of the original data, so that we have an uncropped version
master_xdata = numpy.array(xdata)
master_ydata = numpy.array(ydata)
#crop the data to the correct size
data_mask = numpy.where(numpy.logical_and(master_xdata > l_crop, master_xdata <=r_crop))
xdata = master_xdata[data_mask]
ydata = master_ydata[data_mask]
#find where the H2O peak is
mask = numpy.where(xdata > peak_l_crop)
peak_idx = numpy.argmax(ydata[mask]) + mask[0][0]
print "H2O peak found at ",xdata[peak_idx]
#find the minima either side of the peak
l_min = numpy.argmin(ydata[:peak_idx])
r_min = numpy.argmin(ydata[peak_idx:])+peak_idx
print "l_min = ",xdata[l_min], "r_min = ", xdata[r_min]
#now identify approximately the right number of points beyond
#each minimum such that we have
while (abs(xdata[l_min]-l_crop - target_wavenumber_range) > tolerance or
abs(r_crop - xdata[r_min] - target_wavenumber_range) > tolerance):
l_crop -= target_wavenumber_range - (xdata[l_min]-l_crop)
r_crop -= (r_crop - xdata[r_min]) - target_wavenumber_range
data_mask = numpy.where(numpy.logical_and(master_xdata > l_crop,
master_xdata <=r_crop))
xdata = master_xdata[data_mask]
ydata = master_ydata[data_mask]
mask = numpy.where(xdata > peak_l_crop)
peak_idx = numpy.argmax(ydata[mask]) + mask[0][0]
if bkgd_func is None:
if len(ydata[:peak_idx])>0:
l_min = numpy.argmin(ydata[:peak_idx])
else:
l_min = 0
break
else:
l_min = numpy.argmin(ydata[:peak_idx]-bkgd_func(xdata[:peak_idx]))
r_min = numpy.argmin(ydata[peak_idx:])+peak_idx
print "l_min = ",xdata[l_min], "r_min = ", xdata[r_min]
print "l_crop = ",l_crop, "r_crop = ", r_crop, "\n"
if xdata[l_min] < 2000:
if bkgd_func is not None:
raise ValueError("Could not find low wavenumber minimum.")
print "calling again with bkgd_func"
return get_h2o_fitting_points(master_xdata, master_ydata, bkgd_func=get_global_bkgd(master_xdata, master_ydata))
if xdata[r_min] > 5000:
raise ValueError("Could not find high wavenumber minimum.")
fit_xdata = numpy.concatenate((xdata[:l_min],xdata[r_min:]))
fit_ydata = numpy.concatenate((ydata[:l_min],ydata[r_min:]))
return fit_xdata, fit_ydata
def classify_spectrum(xdata, ydata):
#first find the water peak
l_crop = 2200
r_crop = 4000
master_xdata = numpy.array(xdata)
master_ydata = numpy.array(ydata)
data_mask = numpy.where(numpy.logical_and(master_xdata > l_crop, master_xdata <=r_crop))
xdata = master_xdata[data_mask]
ydata = master_ydata[data_mask]
peak_idx = numpy.argmax(ydata)
global_peak_idx = peak_idx + numpy.argmin(master_xdata-l_crop)
#now find the global minimum
min_y_idx = numpy.argmin(master_ydata[:global_peak_idx])
min_y_xvalue = master_xdata[min_y_idx]
r_min_idx = numpy.argmin(ydata[peak_idx:])+peak_idx
rh_fit_line_param = numpy.polyfit(xdata[r_min_idx:], ydata[r_min_idx:],1)
rh_fit_line=numpy.poly1d(rh_fit_line_param)
if rh_fit_line(min_y_xvalue) < master_ydata[min_y_idx]:
return "Well behaved"
else:
return "Low H2O"
def get_global_bkgd(xdata, ydata):
master_xdata = numpy.array(xdata)
master_ydata = numpy.array(ydata)
line_grad = numpy.gradient(numpy.array(ydata[numpy.argmax(ydata):]))
mask = numpy.where(line_grad > 0)
first_min = ydata[mask[0][0]+numpy.argmax(ydata)]
print "first min at ",xdata[mask[0][0]]
data_mask = numpy.where(numpy.logical_and(master_xdata > 2200, master_xdata <=4000))
#xdata = master_xdata[data_mask]
#ydata = master_ydata[data_mask]
last_val = master_ydata[-1]
polyfit_params = numpy.polyfit(numpy.array([xdata[mask[0][0]],xdata[-1]]), numpy.array([first_min, last_val]), 1)
print "polyfit params = ",polyfit_params
if polyfit_params[0] < 0:
print "returning zeros as backgrounds"
return lambda x: numpy.zeros_like(x)
return numpy.poly1d(polyfit_params)
def fit_h2o_peak(xdata, ydata, axes, plot_fit=True):
#master_xdata = numpy.array(xdata)
#master_ydata = numpy.array(ydata)
#plot(master_xdata, master_ydata)
if len(xdata) != len(ydata):
raise ValueError, "Lengths of xdata and ydata must match"
#crop the x and y data to the fitting range
fit_xdata, fit_ydata = get_h2o_fitting_points(xdata, ydata)
fit_ydata = fit_ydata
polyfit_params = numpy.polyfit(fit_xdata, fit_ydata, 3)
bkgd_function = numpy.poly1d(polyfit_params)
axes.plot(fit_xdata,fit_ydata,'+')
bkgd_xdata = numpy.arange(fit_xdata[0], fit_xdata[-1])
axes.plot(bkgd_xdata, bkgd_function(bkgd_xdata))
return bkgd_function
def calc_h2o_peak_height(xdata, ydata, bkgd_func):
l_crop = 2200
r_crop = 4000
master_xdata = numpy.array(xdata)
master_ydata = numpy.array(ydata)
data_mask = numpy.where(numpy.logical_and(master_xdata > l_crop, master_xdata <=r_crop))
xdata = master_xdata[data_mask]
ydata = master_ydata[data_mask]
peak_idx = numpy.argmax(ydata)
global_peak_idx = peak_idx + numpy.argmin(numpy.abs(master_xdata-l_crop))
print "peak index = %d, global index = %d"%(peak_idx, global_peak_idx)
return master_ydata[global_peak_idx] - bkgd_func(master_xdata[global_peak_idx])
#class FTIRSpecPlot(PlotPanelBase):
#
# def __init__(self, parent, filename):
# self.wavenumber, self.absorbance = load_ftir_file(filename)
# print classify_spectrum(self.wavenumber, self.absorbance)
# PlotPanelBase.__init__(self,parent, os.path.basename(filename))
# self.control_panel = FTIRFittingPanel(self, classify_spectrum(self.wavenumber, self.absorbance))
# self.h_sizer.Insert(0,self.control_panel, flag=wx.ALIGN_LEFT)
#
# self.create_plot()
#
#
# def fit_h2o(self, evnt):
# try:
# wx.BeginBusyCursor()
# bkgd = fit_h2o_peak(self.wavenumber, self.absorbance, self.axes, plot_fit=True)
# peak_height = calc_h2o_peak_height(self.wavenumber, self.absorbance, bkgd)
# self.control_panel.set_peak_height(peak_height)
#
# self.canvas.draw()
# self.canvas.gui_repaint()
# finally:
# wx.EndBusyCursor()
#
# def create_plot(self):
# self.axes.plot(self.wavenumber, self.absorbance)
# self.axes.set_xlim((self.axes.get_xlim()[1],self.axes.get_xlim()[0]))
# self.axes.set_xlabel("Wavenumber")
# self.axes.set_ylabel("Absorbance")
plugins.register(FTIRPlugin())
| gpl-3.0 | -7,008,206,764,611,855,000 | 35.206983 | 120 | 0.611199 | false |
GStreamer/cerbero | test/test_cerbero_packages_packagesstore.py | 4 | 5233 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import tempfile
from cerbero.config import Platform
from cerbero.errors import PackageNotFoundError
from cerbero.packages.package import Package, MetaPackage, SDKPackage,\
InstallerPackage, App
from cerbero.packages.packagesstore import PackagesStore
from test import test_packages_common as common
PACKAGE = '''
class Package(package.Package):
name = 'test-package'
def test_imports(self):
Platform.WINDOWS
Distro.WINDOWS
DistroVersion.WINDOWS_7
Architecture.X86
'''
SDKPACKAGE = '''
class SDKPackage(package.SDKPackage):
name = 'test-package'
'''
INSTALLERPACKAGE = '''
class InstallerPackage(package.InstallerPackage):
name = 'test-package'
'''
class PackageTest(unittest.TestCase):
def setUp(self):
self.config = common.DummyConfig()
self.config.packages_dir = '/test'
self.config.target_platform = Platform.LINUX
self.store = PackagesStore(self.config, False)
def testAddPackage(self):
package = common.Package1(self.config, None, None)
self.assertEqual(len(self.store._packages), 0)
self.store.add_package(package)
self.assertEqual(len(self.store._packages), 1)
self.assertEqual(package, self.store._packages[package.name])
def testGetPackage(self):
package = common.Package1(self.config, None, None)
self.store.add_package(package)
self.assertEqual(package, self.store.get_package(package.name))
def testPackageNotFound(self):
self.assertRaises(PackageNotFoundError, self.store.get_package,
'unknown')
def testPackagesList(self):
package = common.Package1(self.config, None, None)
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(package)
self.store.add_package(metapackage)
l = sorted([package, metapackage], key=lambda x: x.name)
self.assertEqual(l, self.store.get_packages_list())
def testPackageDeps(self):
package = common.Package1(self.config, None, None)
package2 = common.Package2(self.config, None, None)
self.store.add_package(package)
self.store.add_package(package2)
self.assertEqual(package.deps,
[x.name for x in self.store.get_package_deps(package.name)])
def testMetaPackageDeps(self):
metapackage = common.MetaPackage(self.config, None)
self.store.add_package(metapackage)
# the metapackage depends on package that are not yet in the store
self.assertRaises(PackageNotFoundError,
self.store.get_package_deps, metapackage.name)
for klass in [common.Package1, common.Package2, common.Package3,
common.Package4]:
p = klass(self.config, None, None)
self.store.add_package(p)
for klass in [common.MetaPackage]:
p = klass(self.config, None)
self.store.add_package(p)
deps = ['gstreamer-test-bindings', 'gstreamer-test1',
'gstreamer-test2', 'gstreamer-test3']
res = [x.name for x in self.store.get_package_deps(metapackage.name)]
self.assertEqual(sorted(deps), sorted(res))
def testLoadPackageFromFile(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
self.assertEqual('test-package', p.name)
def testLoadMetaPackageFromFile(self):
for x, t in [(SDKPACKAGE, SDKPackage),
(INSTALLERPACKAGE, InstallerPackage)]:
package_file = tempfile.NamedTemporaryFile()
package_file.write(x)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
print(p, type(p))
self.assertIsInstance(p, t)
self.assertEqual('test-package', p.name)
def testImports(self):
package_file = tempfile.NamedTemporaryFile()
package_file.write(PACKAGE)
package_file.flush()
p = self.store._load_package_from_file(package_file.name)
self.assertIsInstance(p, Package)
try:
p.test_imports()
except ImportError as e:
self.fail("Import error raised, %s", e)
| lgpl-2.1 | -1,453,116,195,399,778,300 | 36.378571 | 77 | 0.674374 | false |
manipopopo/tensorflow | tensorflow/contrib/distribute/python/warm_starting_util_test.py | 5 | 3758 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for warm_starting_util with Distribution Strategy.
These tests are located here instead of as part of `WarmStartingUtilTest`
because they need access to distribution strategies which are only present in
contrib right now.
TODO(priyag): Move the tests to core `WarmStartingUtilTest` when distribution
strategy moves out of contrib.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import warm_starting_util as ws_util
class WarmStartingUtilWithDistributionStrategyTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
save_with_distribution=[True, False],
restore_with_distribution=[True, False],
mode=["graph"]))
def testWarmStart(self, distribution, save_with_distribution,
restore_with_distribution):
var_name = "v"
original_value = [[1., 2.], [3., 4.]]
# Create variable and save checkpoint from which to warm-start.
def create_var(g):
with self.test_session(graph=g) as sess:
var = variable_scope.get_variable(var_name, initializer=original_value)
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
ckpt_prefix = os.path.join(self.get_temp_dir(), "model")
saver.save(sess, ckpt_prefix, global_step=0)
return var, sess.run(var)
if save_with_distribution:
with ops.Graph().as_default() as g, distribution.scope():
_, prev_init_val = create_var(g)
else:
with ops.Graph().as_default() as g:
_, prev_init_val = create_var(g)
# Verify we initialized the values correctly.
self.assertAllEqual(original_value, prev_init_val)
def warm_start(g):
with self.test_session(graph=g) as sess:
# Initialize with zeros.
var = variable_scope.get_variable(
var_name, initializer=[[0., 0.], [0., 0.]])
ws_util.warm_start(self.get_temp_dir())
sess.run(variables.global_variables_initializer())
# Verify weights were correctly warm-started to previous values.
self.assertAllEqual(original_value, self.evaluate(var))
# Warm start in a new graph.
if restore_with_distribution:
with ops.Graph().as_default() as g, distribution.scope():
warm_start(g)
else:
with ops.Graph().as_default() as g:
warm_start(g)
if __name__ == "__main__":
test.main()
| apache-2.0 | 3,203,496,606,041,416,700 | 37.742268 | 80 | 0.686003 | false |
denys-duchier/django | tests/model_fields/test_filefield.py | 45 | 3039 | import os
import sys
import unittest
from django.core.files import temp
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.db.utils import IntegrityError
from django.test import TestCase, override_settings
from .models import Document
class FileFieldTests(TestCase):
def test_clearable(self):
"""
FileField.save_form_data() will clear its instance attribute value if
passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
FileField.save_form_data() considers None to mean "no change" rather
than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
FileField.save_form_data(), if passed a truthy value, updates its
instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
d.myfile.delete()
def test_refresh_from_db(self):
d = Document.objects.create(myfile='something.txt')
d.refresh_from_db()
self.assertIs(d.myfile.instance, d)
def test_defer(self):
Document.objects.create(myfile='something.txt')
self.assertEqual(Document.objects.defer('myfile')[0].myfile, 'something.txt')
def test_unique_when_same_filename(self):
"""
A FileField with unique=True shouldn't allow two instances with the
same name to be saved.
"""
Document.objects.create(myfile='something.txt')
with self.assertRaises(IntegrityError):
Document.objects.create(myfile='something.txt')
@unittest.skipIf(sys.platform.startswith('win'), "Windows doesn't support moving open files.")
# The file's source and destination must be on the same filesystem.
@override_settings(MEDIA_ROOT=temp.gettempdir())
def test_move_temporary_file(self):
"""
The temporary uploaded file is moved rather than copied to the
destination.
"""
with TemporaryUploadedFile('something.txt', 'text/plain', 0, 'UTF-8') as tmp_file:
tmp_file_path = tmp_file.temporary_file_path()
Document.objects.create(myfile=tmp_file)
self.assertFalse(os.path.exists(tmp_file_path), 'Temporary file still exists')
| bsd-3-clause | -6,198,713,363,209,987,000 | 34.752941 | 98 | 0.638697 | false |
isabelvillaruiz/ptavi-pfinal | uaserver.py | 1 | 6607 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""USER AGENT SERVER."""
import socket
import socketserver
import sys
import os
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
import time
"""READING AND EXTRACTION OF XML DATA."""
if len(sys.argv) != 2:
sys.exit("Usage: python uaserver.py config")
#FIRST PARAMETER : XML FILE
XML_DATA = sys.argv[1]
class SmallSMILHandler(ContentHandler):
"""CLASE DE LECTURA DE XML."""
def __init__(self):
"""Diccionario xml."""
self.list = []
self.dicc = {"account": ["username", "passwd"],
"uaserver": ["ip", "puerto"],
"rtpaudio": ["puerto"],
"regproxy": ["ip", "puerto"],
"log": ["path"],
"audio": ["path"]}
def startElement(self, name, attrib):
"""Start Element."""
if name in self.dicc:
dicc = {}
for item in self.dicc[name]:
dicc[item] = attrib.get(item, "")
diccname = {name: dicc}
self.list.append(diccname)
def get_tags(self):
"""Devuelve la lista xml."""
return self.list
parser = make_parser()
cHandler = SmallSMILHandler()
parser.setContentHandler(cHandler)
parser.parse(open(XML_DATA))
data = cHandler.get_tags()
print(data)
'DATOS'
#Vamos a probar a sacar algun dato del diccionario creado con los datos del xml
ACCOUNT = data[0]['account']
#print("Esto es account: ", ACCOUNT)
USERNAME = ACCOUNT['username']
#print("Esto es username:", USERNAME)
UASERVER_PORT = data[1]['uaserver']['puerto']
#print("Esto es el puerto de escucha del UAServer:", UASERVER_PORT)
UAS_IP = data[1]['uaserver']['ip']
#print("Esto es la direccion IP del UASERVER: ", UAS_IP)
RTP_PORT = data[2]['rtpaudio']['puerto']
SONG = data[5]['audio']['path']
LOG_FILE = data[4]['log']['path']
PROXY_PORT = data[3]['regproxy']['puerto']
PROXY_IP = data[3]['regproxy']['ip']
'''LOG'''
fichero = LOG_FILE
fich = open(fichero, 'a')
str_now = time.strftime("%Y%m%d%H%M%S", time.gmtime(time.time()))
class EchoHandler(socketserver.DatagramRequestHandler):
"""Echo."""
PORT_RTP = []
def handle(self):
u"""Escribe dirección y puerto cliente (tupla client_address)."""
while 1:
# Leyendo línea a línea lo que nos envía el cliente
text = self.rfile.read()
line = self.rfile.read()
print("Proxy manda cliente: ")
print(text.decode('utf-8'))
LINE = text.decode('utf-8')
REQUESTS = ['INVITE', 'ACK', 'BYE']
Words_LINES = LINE.split()
print("Esta es la linea que me envia el proxy", Words_LINES)
REQUEST = Words_LINES[0]
#PORT_RTP = []
if REQUEST == 'INVITE':
RTP_PORT_RECEIVE = Words_LINES[11]
self.PORT_RTP.append(RTP_PORT_RECEIVE)
#Hemos añadido el puerto a un diccionario
print("LISTA RECIEN INVENTADA", self.PORT_RTP)
print("Puerto RTP nos envia el cliente en INVITE: ")
print(RTP_PORT_RECEIVE)
if not REQUEST in REQUESTS:
LINE_405 = 'SIP/2.0 405 Method Not Allowed\r\n\r\n'
self.wfile.write(LINE_405)
if REQUEST == 'INVITE':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += self.client_address[0] + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
answer = "SIP/2.0 100 Trying\r\n\r\n"
answer += "SIP/2.0 180 Ring\r\n\r\n"
answer += "SIP/2.0 200 OK\r\n\r\n"
answer += "Content-Type: application/sdp\r\n\r\n"
answer += "v=0\r\n" + "o=" + USERNAME + " "
answer += UAS_IP + " \r\n" + "s=SesionGhibli\r\n"
answer += "t=0\r\n" + "m=audio " + RTP_PORT
answer += " RTP\r\n\r\n"
self.wfile.write(bytes(answer, 'utf-8'))
'''LOG'''
datos_log2 = str_now + " Sent to " + PROXY_IP + ":"
datos_log2 += PROXY_PORT + " "
datos_log2 += answer.replace("\r\n", " ") + "\r\n"
fich.write(datos_log2)
elif REQUEST == 'ACK':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += self.client_address[0] + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
#print("imprimiendo la lista inventada", self.PORT_RTP)
PUERTO = self.PORT_RTP[0]
print("Reproduciendo")
aEjecutar = './mp32rtp -i 127.0.0.1 -p ' + PUERTO + ' < '
aEjecutar += SONG
#aEjecutar = "./mp32rtp -i " + DIR_DEST + " -p " + PUERTO
#aEjecutar += " < " + SONG
os.system(aEjecutar)
print('End')
#print("ENVIANDO AUDIO RTP IMAGINARIO AL PUERTO: ", PUERTO)
elif REQUEST == 'BYE':
'''LOG'''
datos_log1 = str_now + " Received from "
datos_log1 += PROXY_IP + ":"
datos_log1 += str(self.client_address[1])
datos_log1 += " " + LINE.replace("\r\n", " ") + "\r\n"
fich.write(datos_log1)
self.wfile.write(b"SIP/2.0 200 OK\r\n\r\n")
'''LOG'''
datos_log2 = str_now + " Sent to "
datos_log2 += PROXY_IP + ":" + PROXY_PORT
datos_log2 += " " + "SIP/2.0 200 OK" + "\r\n"
fich.write(datos_log2)
# Si no hay más líneas salimos del bucle infinito
if not line:
break
if __name__ == "__main__":
# Creamos servidor de eco y escuchamos
serv = socketserver.UDPServer((UAS_IP, int(UASERVER_PORT)), EchoHandler)
print("Listening...")
try:
serv.serve_forever()
except KeyboardInterrupt:
print("Finalizado servidor")
| gpl-2.0 | -3,415,550,420,387,668,000 | 36.931034 | 79 | 0.485606 | false |
qnib/QNIBCollect | src/diamond/collectors/gearman_stats/gearman_stats.py | 6 | 3463 | # coding=utf-8
"""
Port of the ganglia gearman collector
Collects stats from gearman job server
#### Dependencies
* gearman
"""
from diamond.collector import str_to_bool
import diamond.collector
import os
import subprocess
import time
try:
import gearman
except ImportError:
gearman = None
class GearmanCollector(diamond.collector.Collector):
def get_default_config_help(sef):
config_help = super(GearmanCollector, self).get_default_config_help()
config_help.update({
'gearman_pid_path': 'Gearman PID file path',
'url': 'Gearman endpoint to talk to',
'bin': 'Path to ls command',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(GearmanCollector, self).get_default_config()
config.update({
'path': 'gearman_stats',
'gearman_pid_path': '/var/run/gearman/gearman-job-server.pid',
'url': 'localhost',
'bin': '/bin/ls',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
"""
Collector gearman stats
"""
def get_fds(gearman_pid_path):
with open(gearman_pid_path) as fp:
gearman_pid = fp.read().strip()
proc_path = os.path.join('/proc', gearman_pid, 'fd')
command = [self.config['bin'], proc_path]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, errors = process.communicate()
if errors:
raise Exception(errors)
return len(output.splitlines())
def publish_server_stats(gm_admin_client):
# Publish idle/running worker counts
# and no. of tasks queued per task
for entry in gm_admin_client.get_status():
total = entry.get('workers', 0)
running = entry.get('running', 0)
idle = total-running
self.dimensions = {'task': entry['task']} # Internally, this dict is cleared on self.publish
self.publish('gearman.queued', entry['queued'])
self.dimensions = {'type': 'running'}
self.publish('gearman.workers', running)
self.dimensions = {'type': 'idle'}
self.publish('gearman.workers', idle)
try:
if gearman is None:
self.log.error("Unable to import python gearman client")
return
# Collect and Publish Metrics
self.log.debug("Using pid file: %s & gearman endpoint : %s",
self.config['gearman_pid_path'], self.config['url'])
gm_admin_client = gearman.GearmanAdminClient([self.config['url']])
self.publish('gearman.ping', gm_admin_client.ping_server())
self.publish('gearman.fds', get_fds(self.config['gearman_pid_path']))
publish_server_stats(gm_admin_client)
except Exception, e:
self.log.error("GearmanCollector Error: %s", e)
| apache-2.0 | 1,728,892,256,037,856,300 | 31.669811 | 108 | 0.549524 | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_memory_reuse_exclude_feed_var.py | 4 | 2329 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import numpy as np
import unittest
class TestMemoryReuseExcludeFeedVar(unittest.TestCase):
def setUp(self):
self.image_shape = [28, 28]
self.iteration = 10
def main_impl(self, place):
image = fluid.layers.data(
name='image', shape=self.image_shape, dtype='float32')
relu_image = fluid.layers.relu(image)
loss = fluid.layers.reduce_mean(relu_image)
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = True
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
compiled_prog = fluid.CompiledProgram(fluid.default_main_program(
)).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
image_tensor = fluid.LoDTensor()
np_image = np.random.uniform(
low=-10, high=10, size=self.image_shape).astype('float32')
image_tensor.set(np_image, place)
feed_dict = [{image.name: image_tensor}]
for _ in range(self.iteration):
exe.run(compiled_prog, feed=feed_dict, fetch_list=[loss.name])
self.assertTrue(np.array_equal(np.array(image_tensor), np_image))
def test_main(self):
places = [fluid.CPUPlace()]
if fluid.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
with fluid.program_guard(fluid.Program(), fluid.Program()):
with fluid.unique_name.guard():
with fluid.scope_guard(fluid.Scope()):
self.main_impl(p)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,878,942,396,434,879,000 | 34.287879 | 77 | 0.648347 | false |
myusuf3/vincent | examples/line_chart_examples.py | 2 | 1979 | # -*- coding: utf-8 -*-
"""
Vincent Line Examples
"""
#Build a Line Chart from scratch
from vincent import *
import pandas.io.data as web
all_data = {}
for ticker in ['AAPL', 'GOOG', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.get_data_yahoo(ticker, '1/1/2010', '1/1/2013')
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
#Note that we're using timeseries, so x-scale type is "time". For non
#timeseries data, use "linear"
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
transform = MarkRef(data='table',
transform=[Transform(type='facet', keys=['data.col'])])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
stroke=ValueRef(scale="color", field='data.col'),
stroke_width=ValueRef(value=2))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='line',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
data = Data.from_pandas(price)
#Using a Vincent Keyed List here
vis.data['table'] = data
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.to_json('vega.json')
#Convenience method
vis = Line(price)
vis.axis_titles(x='Date', y='Price')
vis.legend(title='Tech Stocks')
vis.colors(brew='Set1')
vis.to_json('vega.json')
| mit | 2,007,897,327,809,592,800 | 33.12069 | 75 | 0.593734 | false |
shanot/imp | modules/container/test/test_close_pairs_score_state.py | 2 | 5501 | from __future__ import print_function
import IMP
import IMP.test
import IMP.core
import IMP.algebra
import IMP.container
import random
class Tests(IMP.test.TestCase):
def _are_close(self, m, a, b, d):
da = IMP.core.XYZR(m, a)
db = IMP.core.XYZR(m, b)
cd = IMP.core.get_distance(da, db)
return (cd <= d)
def _compare_lists(self, m, pc, d, out):
print("comparing")
print(m.get_number_of_score_states())
m.update()
all = out.get_indexes()
indexes = pc.get_indexes()
for i in range(0, len(indexes)):
for j in range(0, i):
a = indexes[i]
b = indexes[j]
pp = (a, b)
if self._are_close(m, a, b, d):
print("expecting " + str(pp))
self.assertTrue((a, b) in all or (b, a) in all)
def test_it(self):
"""Test ClosePairContainer"""
m = IMP.Model()
IMP.set_log_level(IMP.SILENT)
ps = IMP.get_indexes(self.create_particles_in_box(m, 20))
# test rebuilding under move, set input and change radius
pc = IMP.container.ListSingletonContainer(m, ps)
print("creat cpss " + str(pc))
# IMP.set_log_level(IMP.VERBOSE)
print(1)
threshold = 1
cpss = IMP.container.ClosePairContainer(pc, threshold,
IMP.core.QuadraticClosePairsFinder(
),
1)
cpss.set_was_used(True)
for p in ps:
d = IMP.core.XYZR.setup_particle(m, p)
d.set_radius(random.uniform(0, 2))
self._compare_lists(m, pc, threshold, cpss)
# move things a little
for p in ps:
d = IMP.core.XYZ(m, p)
d.set_coordinates(d.get_coordinates()
+ IMP.algebra.get_random_vector_in(IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), .55)))
print("first time")
self._compare_lists(m, pc, threshold, cpss)
# move things a lot
for i in range(0, 10):
print("moving " + str(i))
for p in ps:
d = IMP.core.XYZ(m, p)
d.set_coordinates(d.get_coordinates()
+ IMP.algebra.get_random_vector_in(IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), .7 * (i + 1))))
self._compare_lists(m, pc, threshold, cpss)
def test_restraint_0(self):
"""Test ClosePairContainer over time"""
m = IMP.Model()
IMP.set_log_level(IMP.SILENT)
ps = IMP.get_indexes(self.create_particles_in_box(m, 10))
for p in ps:
IMP.core.XYZR.setup_particle(m, p, 0)
# test rebuilding under move, set input and change radius
pc = IMP.container.ListSingletonContainer(m, ps)
print("creat cpss " + str(pc))
# IMP.set_log_level(IMP.VERBOSE)
print(1)
threshold = 1
cpss = IMP.container.ClosePairContainer(pc, threshold,
IMP.core.QuadraticClosePairsFinder(
),
1)
for i in range(0, 100):
for p in ps:
r = IMP.algebra.get_random_vector_in(
IMP.algebra.get_unit_sphere_3d())
d = IMP.core.XYZ(m, p)
d.set_coordinates(d.get_coordinates() + r)
# make sure internal checks in continer pass
m.update()
def test_restraint(self):
"""Test ClosePairContainer with a restraint"""
m = IMP.Model()
IMP.set_log_level(IMP.VERBOSE)
ps = IMP.get_indexes(self.create_particles_in_box(m, 10))
# test rebuilding under move, set input and change radius
pc = IMP.container.ListSingletonContainer(m, ps)
print("creat cpss " + str(pc))
# IMP.set_log_level(IMP.VERBOSE)
print(1)
threshold = 1
cpss = IMP.container.ClosePairContainer(pc, threshold,
IMP.core.QuadraticClosePairsFinder(
),
1)
r = IMP.container.PairsRestraint(
IMP.core.DistancePairScore(IMP.core.Harmonic(3, 1)), cpss)
for p in ps:
d = IMP.core.XYZR.setup_particle(m, p)
d.set_radius(random.uniform(0, 2))
self._compare_lists(m, pc, threshold, cpss)
# move things a little
for p in ps:
d = IMP.core.XYZ(m, p)
d.set_coordinates(d.get_coordinates()
+ IMP.algebra.get_random_vector_in(IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), .55)))
print("first time")
self._compare_lists(m, pc, threshold, cpss)
# move things a lot
for i in range(0, 10):
print("moving")
j = 0
for p in ps:
j = j + 1
if ((i + j) % 2) == 0:
d = IMP.core.XYZ(m, p)
d.set_coordinates(d.get_coordinates()
+ IMP.algebra.get_random_vector_in(IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0, 0, 0), .7 * (i + 1))))
self._compare_lists(m, pc, threshold, cpss)
if __name__ == '__main__':
IMP.test.main()
| gpl-3.0 | -5,174,504,393,116,856,000 | 38.292857 | 140 | 0.497364 | false |
0be1/ansible | v2/ansible/plugins/cache/memcached.py | 26 | 5898 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import collections
import os
import sys
import time
import threading
from itertools import chain
from ansible import constants as C
from ansible.plugins.cache.base import BaseCacheModule
try:
import memcache
except ImportError:
print 'python-memcached is required for the memcached fact cache'
sys.exit(1)
class ProxyClientPool(object):
"""
Memcached connection pooling for thread/fork safety. Inspired by py-redis
connection pool.
Available connections are maintained in a deque and released in a FIFO manner.
"""
def __init__(self, *args, **kwargs):
self.max_connections = kwargs.pop('max_connections', 1024)
self.connection_args = args
self.connection_kwargs = kwargs
self.reset()
def reset(self):
self.pid = os.getpid()
self._num_connections = 0
self._available_connections = collections.deque(maxlen=self.max_connections)
self._locked_connections = set()
self._lock = threading.Lock()
def _check_safe(self):
if self.pid != os.getpid():
with self._lock:
if self.pid == os.getpid():
# bail out - another thread already acquired the lock
return
self.disconnect_all()
self.reset()
def get_connection(self):
self._check_safe()
try:
connection = self._available_connections.popleft()
except IndexError:
connection = self.create_connection()
self._locked_connections.add(connection)
return connection
def create_connection(self):
if self._num_connections >= self.max_connections:
raise RuntimeError("Too many memcached connections")
self._num_connections += 1
return memcache.Client(*self.connection_args, **self.connection_kwargs)
def release_connection(self, connection):
self._check_safe()
self._locked_connections.remove(connection)
self._available_connections.append(connection)
def disconnect_all(self):
for conn in chain(self._available_connections, self._locked_connections):
conn.disconnect_all()
def __getattr__(self, name):
def wrapped(*args, **kwargs):
return self._proxy_client(name, *args, **kwargs)
return wrapped
def _proxy_client(self, name, *args, **kwargs):
conn = self.get_connection()
try:
return getattr(conn, name)(*args, **kwargs)
finally:
self.release_connection(conn)
class CacheModuleKeys(collections.MutableSet):
"""
A set subclass that keeps track of insertion time and persists
the set in memcached.
"""
PREFIX = 'ansible_cache_keys'
def __init__(self, cache, *args, **kwargs):
self._cache = cache
self._keyset = dict(*args, **kwargs)
def __contains__(self, key):
return key in self._keyset
def __iter__(self):
return iter(self._keyset)
def __len__(self):
return len(self._keyset)
def add(self, key):
self._keyset[key] = time.time()
self._cache.set(self.PREFIX, self._keyset)
def discard(self, key):
del self._keyset[key]
self._cache.set(self.PREFIX, self._keyset)
def remove_by_timerange(self, s_min, s_max):
for k in self._keyset.keys():
t = self._keyset[k]
if s_min < t < s_max:
del self._keyset[k]
self._cache.set(self.PREFIX, self._keyset)
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(',')
else:
connection = ['127.0.0.1:11211']
self._timeout = C.CACHE_PLUGIN_TIMEOUT
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = ProxyClientPool(connection, debug=0)
self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
def _make_key(self, key):
return "{0}{1}".format(self._prefix, key)
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._keys.remove_by_timerange(0, expiry_age)
def get(self, key):
value = self._cache.get(self._make_key(key))
# guard against the key not being removed from the keyset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
return value
def set(self, key, value):
self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
self._keys.add(key)
def keys(self):
self._expire_keys()
return list(iter(self._keys))
def contains(self, key):
self._expire_keys()
return key in self._keys
def delete(self, key):
self._cache.delete(self._make_key(key))
self._keys.discard(key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
return self._keys.copy()
| gpl-3.0 | -2,421,996,697,769,090,000 | 29.879581 | 96 | 0.620888 | false |
ashang/calibre | src/calibre/ebooks/oeb/polish/main.py | 11 | 10814 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, sys, os, time
from collections import namedtuple
from functools import partial
from calibre.ebooks.oeb.polish.container import get_container
from calibre.ebooks.oeb.polish.stats import StatsCollector
from calibre.ebooks.oeb.polish.subset import subset_all_fonts
from calibre.ebooks.oeb.polish.embed import embed_all_fonts
from calibre.ebooks.oeb.polish.cover import set_cover
from calibre.ebooks.oeb.polish.replace import smarten_punctuation
from calibre.ebooks.oeb.polish.jacket import (
replace_jacket, add_or_replace_jacket, find_existing_jacket, remove_jacket)
from calibre.ebooks.oeb.polish.css import remove_unused_css
from calibre.utils.logging import Log
ALL_OPTS = {
'embed': False,
'subset': False,
'opf': None,
'cover': None,
'jacket': False,
'remove_jacket':False,
'smarten_punctuation':False,
'remove_unused_css':False,
}
CUSTOMIZATION = {
'remove_unused_classes': False,
}
SUPPORTED = {'EPUB', 'AZW3'}
# Help {{{
HELP = {'about': _(
'''\
<p><i>Polishing books</i> is all about putting the shine of perfection onto
your carefully crafted ebooks.</p>
<p>Polishing tries to minimize the changes to the internal code of your ebook.
Unlike conversion, it <i>does not</i> flatten CSS, rename files, change font
sizes, adjust margins, etc. Every action performs only the minimum set of
changes needed for the desired effect.</p>
<p>You should use this tool as the last step in your ebook creation process.</p>
{0}
<p>Note that polishing only works on files in the %s formats.</p>\
''')%_(' or ').join(sorted('<b>%s</b>'%x for x in SUPPORTED)),
'embed': _('''\
<p>Embed all fonts that are referenced in the document and are not already embedded.
This will scan your computer for the fonts, and if they are found, they will be
embedded into the document.</p>
<p>Please ensure that you have the proper license for embedding the fonts used in this document.</p>
'''),
'subset': _('''\
<p>Subsetting fonts means reducing an embedded font to contain
only the characters used from that font in the book. This
greatly reduces the size of the font files (halving the font
file sizes is common).</p>
<p>For example, if the book uses a specific font for headers,
then subsetting will reduce that font to contain only the
characters present in the actual headers in the book. Or if the
book embeds the bold and italic versions of a font, but bold
and italic text is relatively rare, or absent altogether, then
the bold and italic fonts can either be reduced to only a few
characters or completely removed.</p>
<p>The only downside to subsetting fonts is that if, at a later
date you decide to add more text to your books, the newly added
text might not be covered by the subset font.</p>
'''),
'jacket': _('''\
<p>Insert a "book jacket" page at the start of the book that contains
all the book metadata such as title, tags, authors, series, comments,
etc. Any previous book jacket will be replaced.</p>'''),
'remove_jacket': _('''\
<p>Remove a previous inserted book jacket page.</p>
'''),
'smarten_punctuation': _('''\
<p>Convert plain text dashes, ellipsis, quotes, multiple hyphens, etc. into their
typographically correct equivalents.</p>
<p>Note that the algorithm can sometimes generate incorrect results, especially
when single quotes at the start of contractions are involved.</p>
'''),
'remove_unused_css': _('''\
<p>Remove all unused CSS rules from stylesheets and <style> tags. Some books
created from production templates can have a large number of extra CSS rules
that dont match any actual content. These extra rules can slow down readers
that need to parse them all.</p>
'''),
}
def hfix(name, raw):
if name == 'about':
return raw.format('')
raw = raw.replace('\n\n', '__XX__')
raw = raw.replace('\n', ' ')
raw = raw.replace('__XX__', '\n')
raw = raw.replace('<', '<').replace('>', '>')
return raw
CLI_HELP = {x:hfix(x, re.sub('<.*?>', '', y)) for x, y in HELP.iteritems()}
# }}}
def update_metadata(ebook, new_opf):
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.metadata.epub import update_metadata
opfpath = ebook.name_to_abspath(ebook.opf_name)
with ebook.open(ebook.opf_name, 'r+b') as stream, open(new_opf, 'rb') as ns:
opf = OPF(stream, basedir=os.path.dirname(opfpath), populate_spine=False,
unquote_urls=False)
mi = OPF(ns, unquote_urls=False,
populate_spine=False).to_book_metadata()
mi.cover, mi.cover_data = None, (None, None)
update_metadata(opf, mi, apply_null=True, update_timestamp=True)
stream.seek(0)
stream.truncate()
stream.write(opf.render())
def polish_one(ebook, opts, report, customization=None):
rt = lambda x: report('\n### ' + x)
jacket = None
changed = False
customization = customization or CUSTOMIZATION.copy()
if opts.subset or opts.embed:
stats = StatsCollector(ebook, do_embed=opts.embed)
if opts.opf:
changed = True
rt(_('Updating metadata'))
update_metadata(ebook, opts.opf)
jacket = find_existing_jacket(ebook)
if jacket is not None:
replace_jacket(ebook, jacket)
report(_('Updated metadata jacket'))
report(_('Metadata updated\n'))
if opts.cover:
changed = True
rt(_('Setting cover'))
set_cover(ebook, opts.cover, report)
report('')
if opts.jacket:
changed = True
rt(_('Inserting metadata jacket'))
if jacket is None:
if add_or_replace_jacket(ebook):
report(_('Existing metadata jacket replaced'))
else:
report(_('Metadata jacket inserted'))
else:
report(_('Existing metadata jacket replaced'))
report('')
if opts.remove_jacket:
rt(_('Removing metadata jacket'))
if remove_jacket(ebook):
report(_('Metadata jacket removed'))
changed = True
else:
report(_('No metadata jacket found'))
report('')
if opts.smarten_punctuation:
rt(_('Smartening punctuation'))
if smarten_punctuation(ebook, report):
changed = True
report('')
if opts.embed:
rt(_('Embedding referenced fonts'))
if embed_all_fonts(ebook, stats, report):
changed = True
report('')
if opts.subset:
rt(_('Subsetting embedded fonts'))
if subset_all_fonts(ebook, stats.font_stats, report):
changed = True
report('')
if opts.remove_unused_css:
rt(_('Removing unused CSS rules'))
if remove_unused_css(ebook, report, remove_unused_classes=customization['remove_unused_classes']):
changed = True
report('')
return changed
def polish(file_map, opts, log, report):
st = time.time()
for inbook, outbook in file_map.iteritems():
report(_('## Polishing: %s')%(inbook.rpartition('.')[-1].upper()))
ebook = get_container(inbook, log)
polish_one(ebook, opts, report)
ebook.commit(outbook)
report('-'*70)
report(_('Polishing took: %.1f seconds')%(time.time()-st))
REPORT = '{0} REPORT {0}'.format('-'*30)
def gui_polish(data):
files = data.pop('files')
if not data.pop('metadata'):
data.pop('opf')
if not data.pop('do_cover'):
data.pop('cover', None)
file_map = {x:x for x in files}
opts = ALL_OPTS.copy()
opts.update(data)
O = namedtuple('Options', ' '.join(ALL_OPTS.iterkeys()))
opts = O(**opts)
log = Log(level=Log.DEBUG)
report = []
polish(file_map, opts, log, report.append)
log('')
log(REPORT)
for msg in report:
log(msg)
return '\n\n'.join(report)
def tweak_polish(container, actions, customization=None):
opts = ALL_OPTS.copy()
opts.update(actions)
O = namedtuple('Options', ' '.join(ALL_OPTS.iterkeys()))
opts = O(**opts)
report = []
changed = polish_one(container, opts, report.append, customization=customization)
return report, changed
def option_parser():
from calibre.utils.config import OptionParser
USAGE = '%prog [options] input_file [output_file]\n\n' + re.sub(
r'<.*?>', '', CLI_HELP['about'])
parser = OptionParser(usage=USAGE)
a = parser.add_option
o = partial(a, default=False, action='store_true')
o('--embed-fonts', '-e', dest='embed', help=CLI_HELP['embed'])
o('--subset-fonts', '-f', dest='subset', help=CLI_HELP['subset'])
a('--cover', '-c', help=_(
'Path to a cover image. Changes the cover specified in the ebook. '
'If no cover is present, or the cover is not properly identified, inserts a new cover.'))
a('--opf', '-o', help=_(
'Path to an OPF file. The metadata in the book is updated from the OPF file.'))
o('--jacket', '-j', help=CLI_HELP['jacket'])
o('--remove-jacket', help=CLI_HELP['remove_jacket'])
o('--smarten-punctuation', '-p', help=CLI_HELP['smarten_punctuation'])
o('--remove-unused-css', '-u', help=CLI_HELP['remove_unused_css'])
o('--verbose', help=_('Produce more verbose output, useful for debugging.'))
return parser
def main(args=None):
parser = option_parser()
opts, args = parser.parse_args(args or sys.argv[1:])
log = Log(level=Log.DEBUG if opts.verbose else Log.INFO)
if not args:
parser.print_help()
log.error(_('You must provide the input file to polish'))
raise SystemExit(1)
if len(args) > 2:
parser.print_help()
log.error(_('Unknown extra arguments'))
raise SystemExit(1)
if len(args) == 1:
inbook = args[0]
base, ext = inbook.rpartition('.')[0::2]
outbook = base + '_polished.' + ext
else:
inbook, outbook = args
popts = ALL_OPTS.copy()
for k, v in popts.iteritems():
popts[k] = getattr(opts, k, None)
O = namedtuple('Options', ' '.join(popts.iterkeys()))
popts = O(**popts)
report = []
if not tuple(filter(None, (getattr(popts, name) for name in ALL_OPTS))):
parser.print_help()
log.error(_('You must specify at least one action to perform'))
raise SystemExit(1)
polish({inbook:outbook}, popts, log, report.append)
log('')
log(REPORT)
for msg in report:
log(msg)
log('Output written to:', outbook)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,445,007,646,269,031,000 | 33.43949 | 106 | 0.638524 | false |
peuyeum/peuyeum | apps/models/prov.py | 2 | 1470 | import sys
import pymongo
import time
sys.path.append('../../')
from lib import config
conn = pymongo.MongoClient(config.mongohost, config.mongoport)
db = conn.klinik
def getProfile(UID):
db.profile
return db.profile.find_one({"uid":UID})
def isProfileExist(UID):
if getProfile(UID)== None:
return False
else:
return True
def getAllRec(NIK):
db.rec
return db.profile.find({"UID":NPM},{ "waktu": 1, "Nilai": 1, "Topik": 1,"_id": 0 })
def getLastRec(NIK):
db.rec
return db.rec.find_one({"NPM":NPM})
def getToday(NIK):
db.rec
return db.rec.find({"NPM":NPM,"waktu":time.strftime("%d/%m/%Y")})
def isIndexExist(cursor):
try:
cursor[0]
return True
except IndexError:
return False
def insertTodayOnly(NPM,Nilai,Pembimbing,Topik):
cur = getToday(NPM)
if isIndexExist(cur):
return "exist"
else:
insertRec(NPM,Nilai,Pembimbing,Topik)
return "done"
def insertRec(NPM,Nilai,Pembimbing,Topik):
db.rec
doc = {"NPM":NPM,"Nilai":int(Nilai),"waktu":time.strftime("%d/%m/%Y"),"Pembimbing":Pembimbing,"Topik":Topik}
idProcess = db.rec.insert_one(doc).inserted_id
return str(doc)
def insertProfile(uid,nama,tanggal_lahir,alamat,pekerjaan,telepon,gender,agama):
db.profile
doc = {"uid":uid,"nama":nama,"waktu_pendaftaran":time.strftime("%d/%m/%Y"),"tanggal_lahir":tanggal_lahir,"alamat":alamat,"pekerjaan":pekerjaan,"telepon":telepon,"gender":gender,"agama":agama}
idProcess = db.profile.insert_one(doc).inserted_id
return str(doc)
| agpl-3.0 | -3,901,274,143,539,796,500 | 23.516667 | 192 | 0.707483 | false |
solvery/lang-features | python/network/dhcp_1/dhcp_base.py | 1 | 1558 | #
# DHCP Base Structure
#
# @Author : iver Liu
#
import socket
import struct
from uuid import getnode as get_mac
from random import randint
def getMacInBytes():
mac = str(hex(get_mac()))
mac = mac[2:]
while len(mac) < 12:
mac = '0' + mac
macb = b''
for i in range(0,12,2):
tmp = int(mac[i:i+2], 16)
macb += struct.pack('!B', tmp)
# pack every two byte
return macb
def randomMacInBytes():
randList = []
for i in range(0,12):
randList.append(hex(randint(0,16)).split('x')[1])
fakeMac = ''.join( str(e) for e in randList )
print(fakeMac)
fake_macb = b''
for i in range(0,12,2):
tmp = int(fakeMac[i:i+2], 16)
fake_macb += struct.pack('!B', tmp)
return fake_macb
def packIPInBytes(ip):
tmp = ip.split('.')
ipb = b''
for i in tmp:
ipb += struct.pack('!B', int(i))
return ipb
def packetUnpack( packet ):
data = {} # empty dictionary
data['op'] = packet[0]
data['htype'] = packet[1]
data['hlen'] = packet[2]
data['hops'] = packet[3]
data['xid'] = packet[4:8]
data['secs'] = packet[8:10]
data['flags'] = packet[10:12]
data['ciaddr'] = packet[12:16]
data['yiaddr'] = packet[16:20]
data['siaddr'] = packet[20:24]
data['giaddr'] = packet[24:28]
data['chaddr'] = packet[28:43] #Client HW addr. with some useless(? padding
data['sname'] = packet[43:107]
data['file'] = packet[107:235]
data['options'] = packet[235:547]
return data
| gpl-2.0 | -413,482,002,627,418,400 | 19.5 | 79 | 0.552632 | false |
HyukjinKwon/spark | examples/src/main/python/ml/rformula_example.py | 27 | 1481 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $example on$
from pyspark.ml.feature import RFormula
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("RFormulaExample")\
.getOrCreate()
# $example on$
dataset = spark.createDataFrame(
[(7, "US", 18, 1.0),
(8, "CA", 12, 0.0),
(9, "NZ", 15, 0.0)],
["id", "country", "hour", "clicked"])
formula = RFormula(
formula="clicked ~ country + hour",
featuresCol="features",
labelCol="label")
output = formula.fit(dataset).transform(dataset)
output.select("features", "label").show()
# $example off$
spark.stop()
| apache-2.0 | -7,980,254,608,035,620,000 | 31.911111 | 74 | 0.671843 | false |
khosrow/metpx | pxStats/lib/BackwardReader.py | 1 | 7727 | #! /usr/bin/env python
"""
##############################################################################
##
##
## @name : backwardReader.py
##
## @license : MetPX Copyright (C) 2004-2006 Environment Canada
## MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
## named COPYING in the root of the source directory tree.
##
##
## @author : Nicholas Lemay
##
## @since: : 06-07-2006
##
##
## @summary : Small utility that can be used to read text files backward.
##
## Has a readlineBackwards method that is similar to readline
## and a tail method that is similar to the tail used in linux
##
##############################################################################
"""
import os,sys
class BackwardReader:
def tail( nbLines = 1, file = "", printIt = False ):
"""
@summary : Similar to the usual tail method we use in linux, only now it is in pure python.
@warning : File must exist or else program will be terminated.
@param nbLines : Number of lines we want to get from the end of the file.
@param file : Absolute path to the file we want to use.
@param printIt : Whether or not user want to print the results of action performed here.
@return : Lines from the bottom of the file.
"""
if os.path.isfile( file ):
offset = -1
lines = []
fileSize = os.stat( file )[6]
if fileSize >=2 :
fileHandle = open( file ,"r" )
fileHandle.seek( offset,2 )
for lineNumber in range( nbLines + 1 ) :
while abs( offset ) <= fileSize and fileHandle.read(1) == "\n" :
offset = offset - 1
if abs( offset) <= fileSize :
fileHandle.seek( offset,2 )
else:
fileHandle.seek( (fileSize - ( 2*fileSize ) ), 2 )
while abs( offset) <= fileSize and fileHandle.read(1) != "\n" :
offset = offset - 1
if abs( offset) <= fileSize :
fileHandle.seek( offset, 2 )
else:
fileHandle.seek( (fileSize - ( 2 * fileSize) ), 2 )
line = fileHandle.readline()
if line != "" : #might not be usefull
lines.append( line )
if abs( offset) > fileSize : # cant tail any more lines than the file pocess..
break
fileHandle.close()
lines.reverse()
if printIt == True :
for line in lines:
print line
else:
print "Error. %s does not exist." %file
print "Program terminated."
sys.exit()
return lines
tail = staticmethod(tail)
def readLineBackwards( fileHandle, offset = -1 , fileSize =0 ) :
"""
@summary : This method is to be used in place of readlines
to read a file line by line backwards.
@note : It will prove to be much faster and much less
demanding on memory when using large files than
reading an entire file form the top with either
readline or readlines.
@param fileHandle : fileHandle of the file to read,
@param offset : offset from which to start reading.
@param fileSize : Size of the file
@return : tuple conatinaing the readline and the offset
before the read line.
"""
line = ""
if abs( offset ) <= fileSize :
fileHandle.seek( offset,2 )
while abs( offset ) <= fileSize and fileHandle.read(1) == "\n" :
offset = offset- 1
if abs( offset ) <= fileSize :
fileHandle.seek( offset,2 )
else:
fileHandle.seek( ( fileSize - ( 2*fileSize ) ), 2 )
if abs( offset ) <= fileSize :
fileHandle.seek( offset, 2 )
else:
fileHandle.seek( ( fileSize - ( 2 * fileSize ) ), 2 )
while abs( offset ) <= fileSize and fileHandle.read(1) != "\n" :
offset = offset- 1
if abs( offset ) <= fileSize :
fileHandle.seek( offset,2 )
else:
fileHandle.seek( (fileSize - ( 2*fileSize ) ), 2 )
line = fileHandle.readline()
return line, offset
readLineBackwards = staticmethod( readLineBackwards )
if __name__ == "__main__":
"""
Small test case. Tests if everything works plus gives an idea on proper usage.
"""
#------------------------------------------------------ print "tail tests :"
# tail( nbLines =10, file = PXPaths.STATS + "testFiles/empty", printIt = True )
# tail( nbLines =10, file = PXPaths.STATS + "testFiles/tx_amis.log", printIt = True )
# tail( nbLines =10, file = PXPaths.STATS + "testFiles/onelinefile", printIt = True)
#------------------------------------------------------------------------------
#---------------------------------------- print "read lines backward test :"
#-------------------------------- fileName = PXPaths.STATS + "testFiles/bob"
#------------------------------------------ fileHandle = open(fileName, "r")
#------------------------------------------------------------------------------
#------------------------------------------- fileSize = os.stat(fileName)[6]
# line,offset = readLineBackwards( fileHandle, offset = -1, fileSize = fileSize )
#------------------------------------------------------------------------------
#---------------------------------------------------------------- print line
#------------------------------------------------------------------------------
#--------------------------------------------------------- while line != "":
# line,offset = readLineBackwards( fileHandle = fileHandle, offset = offset , fileSize = fileSize )
#-------------------------------------------------------- if line != "":
#-------------------------------------------------------- print line
#------------------------------------------------------------------------------
# tail( nbLines =10, file = PXPaths.STATS + "testFiles/nonexisting", printIt = True )
#------------------------------------------------------------------------------
#-------------------------------------------------------- fileHandle.close()
| gpl-2.0 | 3,711,916,463,053,660,700 | 39.036269 | 111 | 0.376213 | false |
ucsd-lo-group/social-network-analysis | dataprocessing/notecard2st_weights.py | 1 | 2944 | # notecard2st.py
# turns notecard format csv to source,target format csv
import sys
import re
from itertools import groupby
# distribute lines into source,target format
# takes lines from stdin (format n1,n2,n3,...nm)
# returns formatted lines to stdout
# (format n1,n2 <line break> n1,n3 <line break> ... n1,nm <line break>)
def distribute_st(lines):
ret = []
for line in lines:
tokens = line.strip().split(',')
for num in range(1,len(tokens)):
ret.append(tokens[0]+','+tokens[num]+'\n')
return ret
# replace consecutive commas with a single comma
def rm_multi_comma(lines):
mcomma_patt = re.compile(r',+')
ret = []
for line in lines:
ret.append(mcomma_patt.sub(',', line))
return ret
# remove characters "," " " and "\t" from the end of each line
def rm_ending_chars(lines):
char_patt = re.compile(r'[, \t]+$')
ret = []
for line in lines:
ret.append(char_patt.sub('', line))
return ret
def main(fin, fout_st, fout_weight):
try:
file_in = open(fin, 'r')
except OSError:
print('Could not open file '+fin)
try:
file_out_st = open(fout_st, 'w')
except OSError:
print('Could not open file '+fout_st)
try:
file_out_weight = open(fout_weight, 'w')
except OSError:
print('Could not open file '+fout_weight)
# FORMATTING
# remove header
file_in.readline()
# get the lines of the file coming in
lines = file_in.readlines()
file_in.close()
# remove multiple commas, then remove ending chars ", \t"
# then distribute to source,target format
formatted_lines = distribute_st(rm_ending_chars(
rm_multi_comma(lines)))
# Analyze lines
# sort first, need ints
int_lines = []
for line in formatted_lines:
left,right = line.strip('\n').split(',')
int_lines.append([int(left), int(right)])
int_lines.sort()
print_lines = []
for int_line in int_lines:
print_lines.append(str(int_line)[1:-1].replace(' ', '')+'\n')
# get weight, the number of times an edge appears
weights = []
idx = 0
while (idx < len(print_lines)):
curr = print_lines[idx]
count = 0
while ((idx < len(print_lines)) and (curr == print_lines[idx])):
count += 1
idx += 1
weights.append(count)
# keep only unique values
unique_edges = []
for e in print_lines:
if e not in unique_edges:
unique_edges.append(e)
# write to csv (source,target and weights)
unique_edges.insert(0, 'Source,Target\n')
for line in unique_edges:
file_out_st.write(line)
file_out_st.close()
weights.insert(0, 'weight')
for line in weights:
file_out_weight.write(str(line)+'\n')
file_out_weight.close()
if __name__ == '__main__':
fin = input('Input filename (notecard csv): ')
fout_st = input('Output filename (source,target csv; duplicates will be overwritten): ')
fout_weight = input('Output filename (weight csv; duplicates will be overwritten): ')
main(fin, fout_st, fout_weight) | mit | 137,775,241,673,863,230 | 25.061947 | 90 | 0.644022 | false |
ellisonbg/nbgrader | nbgrader/preprocessors/execute.py | 1 | 1094 | from nbconvert.preprocessors import ExecutePreprocessor
from traitlets import Bool, List
from textwrap import dedent
from . import NbGraderPreprocessor
class UnresponsiveKernelError(Exception):
pass
class Execute(NbGraderPreprocessor, ExecutePreprocessor):
interrupt_on_timeout = Bool(True)
allow_errors = Bool(True)
raise_on_iopub_timeout = Bool(True)
extra_arguments = List([], config=True, help=dedent(
"""
A list of extra arguments to pass to the kernel. For python kernels,
this defaults to ``--HistoryManager.hist_file=:memory:``. For other
kernels this is just an empty list.
"""))
def preprocess(self, nb, resources):
kernel_name = nb.metadata.get('kernelspec', {}).get('name', 'python')
if self.extra_arguments == [] and kernel_name == "python":
self.extra_arguments = ["--HistoryManager.hist_file=:memory:"]
try:
output = super(Execute, self).preprocess(nb, resources)
except RuntimeError:
raise UnresponsiveKernelError()
return output
| bsd-3-clause | -1,926,327,517,012,041,000 | 32.151515 | 77 | 0.666362 | false |
JingheZ/shogun | examples/undocumented/python_modular/kernel_simple_locality_improved_string_modular.py | 26 | 1027 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,5,5,1],[traindat,testdat,5,3,2]]
def kernel_simple_locality_improved_string_modular (fm_train_dna=traindat,fm_test_dna=testdat,
length=5,inner_degree=5,outer_degree=1 ):
from modshogun import StringCharFeatures, DNA
from modshogun import SimpleLocalityImprovedStringKernel, MSG_DEBUG
feats_train=StringCharFeatures(fm_train_dna, DNA)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_test=StringCharFeatures(fm_test_dna, DNA)
kernel=SimpleLocalityImprovedStringKernel(
feats_train, feats_train, length, inner_degree, outer_degree)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('SimpleLocalityImprovedString')
kernel_simple_locality_improved_string_modular(*parameter_list[0])
| gpl-3.0 | -7,032,150,948,050,990,000 | 33.233333 | 94 | 0.76631 | false |
pozetroninc/micropython | tests/basics/string_count.py | 18 | 1288 | try:
str.count
except AttributeError:
print("SKIP")
raise SystemExit
print("".count(""))
print("".count("a"))
print("a".count(""))
print("a".count("a"))
print("a".count("b"))
print("b".count("a"))
print("aaa".count(""))
print("aaa".count("a"))
print("aaa".count("aa"))
print("aaa".count("aaa"))
print("aaa".count("aaaa"))
print("aaaa".count(""))
print("aaaa".count("a"))
print("aaaa".count("aa"))
print("aaaa".count("aaa"))
print("aaaa".count("aaaa"))
print("aaaa".count("aaaaa"))
print("aaa".count("", 1))
print("aaa".count("", 2))
print("aaa".count("", 3))
print("aaa".count("", 1, 2))
print("asdfasdfaaa".count("asdf", -100))
print("asdfasdfaaa".count("asdf", -8))
print("asdf".count('s', True))
print("asdf".count('a', True))
print("asdf".count('a', False))
print("asdf".count('a', 1 == 2))
print("hello world".count('l'))
print("hello world".count('l', 5))
print("hello world".count('l', 3))
print("hello world".count('z', 3, 6))
print("aaaa".count('a'))
print("aaaa".count('a', 0, 3))
print("aaaa".count('a', 0, 4))
print("aaaa".count('a', 0, 5))
print("aaaa".count('a', 1, 5))
print("aaaa".count('a', -1, 5))
print("abbabba".count("abba"))
def t():
return True
print("0000".count('0', t()))
try:
'abc'.count(1)
except TypeError:
print('TypeError')
| mit | 2,608,261,781,602,174,500 | 20.830508 | 40 | 0.592391 | false |
ycool/apollo | modules/tools/replay/replay_file.py | 3 | 3847 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This program can replay a message pb file
"""
import os.path
import sys
import argparse
import glob
import time
from google.protobuf import text_format
from cyber_py import cyber
import common.proto_utils as proto_utils
from common.message_manager import PbMessageManager
g_message_manager = PbMessageManager()
def topic_publisher(topic, filename, period):
"""publisher"""
cyber.init()
node = cyber.Node("replay_file")
meta_msg = None
msg = None
if not topic:
print "Topic not specified, start to guess"
meta_msg, msg = g_message_manager.parse_file(filename)
topic = meta_msg.topic()
else:
meta_msg = g_message_manager.get_msg_meta_by_topic(topic)
if not meta_msg:
print("Failed to find meta info for topic: %s" % (topic))
return False
msg = meta_msg.parse_file(filename)
if not msg:
print("Failed to parse file[%s] with topic[%s]" % (filename,
topic))
return False
if not msg or not meta_msg:
print("Unknown topic: %s" % topic)
return False
writer = node.create_writer(topic, meta_msg.msg_type)
if period == 0:
while not cyber.is_shutdown():
raw_input("Press any key to publish one message...")
writer.write(msg)
print("Topic[%s] message published" % topic)
else:
print("started to publish topic[%s] message with rate period %s" %
(topic, period))
while not cyber.is_shutdown():
writer.write(msg)
time.sleep(period)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="replay a planning result pb file")
parser.add_argument(
"filename", action="store", type=str, help="planning result files")
parser.add_argument(
"--topic", action="store", type=str, help="set the planning topic")
parser.add_argument(
"--period",
action="store",
type=float,
default=0.1,
help="set the topic publish time duration")
args = parser.parse_args()
period = 0.0 # use step by step mode
if args.period: # play with a given period, (1.0 / frequency)
period = args.period
to_replay = args.filename
files = []
if os.path.isdir(args.filename):
files = glob.glob(args.filename + "/*")
i = 0
for f in files:
print "%d %s" % (i, f)
i += 1
str_input = raw_input("Select message by number: ")
try:
selected_file = int(str_input)
if selected_file < 0 or selected_file > len(files):
print "%d is an invalid number" % selected_file
except:
print "%s is not a number" % str_input
print "Will publish file[%d]: %s" % (selected_file,
files[selected_file])
to_replay = files[selected_file]
topic_publisher(args.topic, to_replay, period)
| apache-2.0 | -2,618,580,213,110,320,000 | 33.348214 | 79 | 0.580452 | false |
ynfin/Skynet | scripts/getCurrentNames.py | 1 | 2100 | import os
import sys
from lxml import etree
import time
skynetFiles_lower = []
skynetFiles = []
for path, subdirs, files in os.walk('/home/pi/var/www/disk/skynet'):
for name in files:
if name.endswith('.mp4'):
skynetFiles_lower.append(name.lower())
skynetFiles.append(os.path.join(path,name))
print name
print os.path.join(path,name)
print ' '
skynetFiles.sort(key=lambda x: os.path.getmtime(x), reverse=True)
with open('/home/pi/.flexget/config.yml') as f:
content = f.readlines()
sidepanel = []
for line in content:
if line.startswith(" - "):
cleanline = line.replace(" - ","").lower()
splitline = cleanline.split()
for filename in skynetFiles_lower:
if all(x in filename for x in splitline):
print filename + ' matches ',
print splitline
sidepanel.append(' '.join([str(x) for x in splitline]).title())
finalList = list(set(sidepanel))
finalList.sort()
# Write XML file for AJAX sidepanel
response = etree.Element("response")
panelfiles = etree.SubElement(response, "panelfiles")
serverfiles = etree.SubElement(response, "serverfiles")
for item in finalList:
panelfile = etree.SubElement(panelfiles, "panelfile")
filename = etree.SubElement(panelfile, "panelfilename")
filename.text = str(item)
for item in skynetFiles:
serverfile = etree.SubElement(serverfiles, "serverfile")
filename = etree.SubElement(serverfile, "serverfilename")
filepath = etree.SubElement(serverfile, "serverfilepath")
filedate = etree.SubElement(serverfile, "serverfiledate")
filename.text = str(os.path.basename(item))
filepath.text = str(item).replace("/home/pi/var/www/","")
filedate.text = str(time.ctime(os.path.getmtime(item)))
print(etree.tostring(response, pretty_print=True))
# Write the XML to the output file
with open('/home/pi/var/www/skynetcontent.xml', 'w') as output_file:
output_file.write('<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>\n')
output_file.write(etree.tostring(response, pretty_print = True))
print "xml written..."
| apache-2.0 | -2,144,172,082,374,635,800 | 31.8125 | 83 | 0.688095 | false |
bitpay/bitpay-python | examples/test_merchant_facade.py | 1 | 3843 | from bitpay.exceptions import *
import bitpay.key_utils as bku
from bitpay.client import *
import pprint
import requests
import json
import os.path
import sys
#API_HOST = "https://bitpay.com" #for production, live bitcoin
API_HOST = "https://test.bitpay.com" #for testing, testnet bitcoin
KEY_FILE = "tmp/key.priv"
TOKEN_FILE = "tmp/token.priv"
# check if there is a preexisting key file
if os.path.isfile(KEY_FILE):
f = open(KEY_FILE, 'r')
key = f.read()
f.close()
print("Creating a bitpay client using existing private key from disk.")
else:
key = bku.generate_pem()
f = open(KEY_FILE, 'w')
f.write(key)
f.close()
client = Client(API_HOST, False, key)
def fetch_token(facade):
if os.path.isfile(TOKEN_FILE + facade):
f = open(TOKEN_FILE + facade, 'r')
token = f.read()
f.close()
print("Reading " + facade + " token from disk.")
#global client
#client = Client(API_HOST, False, key, {facade: token})
client.tokens[facade] = token
else:
pairingCode = client.create_token(facade)
print("Creating " + facade + " token.")
print("Please go to: %s/dashboard/merchant/api-tokens then enter \"%s\" then click the \"Find\" button, then click \"Approve\"" % (API_HOST, pairingCode))
if int(sys.version[0]) == 3:
input("When you've complete the above, hit enter to continue...")
else:
raw_input("When you've complete the above, hit enter to continue...")
print("token is: %s" % client.tokens[facade])
f = open(TOKEN_FILE + facade, 'w')
f.write(client.tokens[facade])
f.close()
def get_from_bitpay_api(client, uri, token):
payload = "?token=%s" % token
xidentity = bku.get_compressed_public_key_from_pem(client.pem)
xsignature = bku.sign(uri + payload, client.pem)
headers = {"content-type": "application/json",
"X-Identity": xidentity,
"X-Signature": xsignature, "X-accept-version": "2.0.0"}
try:
pp.pprint(headers)
print(uri + payload)
response = requests.get(uri + payload, headers=headers, verify=client.verify)
except Exception as pro:
raise BitPayConnectionError(pro.args)
if response.ok:
return response.json()['data']
client.response_error(response)
"""
POST to any resource
Make sure to include the proper token in the params
"""
def post_to_bitpay_api(client, uri, resource, params):
payload = json.dumps(params)
uri = uri + "/" + resource
xidentity = key_utils.get_compressed_public_key_from_pem(client.pem)
xsignature = key_utils.sign(uri + payload, client.pem)
headers = {"content-type": "application/json",
"X-Identity": xidentity,"X-Signature": xsignature,
"X-accept-version": "2.0.0"}
try:
response = requests.post(uri, data=payload, headers=headers,
verify=client.verify)
except Exception as pro:
raise BitPayConnectionError(pro.args)
if response.ok:
return response.json()['data']
client.response_error(response)
fetch_token("merchant")
#Now we assume that the pairing code that we generated along with the crypto keys is paired with your merchant account
#
print("We will create an invoice using the merchant facade")
invoice = client.create_invoice({"price": 50.00, "currency": "USD", "token": client.tokens['merchant']})
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(invoice)
print("hopefully the above looks OK?")
print("continuing if we can...")
invoiceId = invoice['id']
print("**")
print("Now fetching an invoice with invoiceId " + invoiceId)
print("**")
token = client.tokens['merchant']
invoice = get_from_bitpay_api(client, client.uri + "/invoices/" + invoiceId, token)
pp.pprint(invoice)
| mit | 1,352,606,894,807,645,400 | 33.3125 | 164 | 0.644288 | false |
yb-kim/osSim | script/micro.nSet.py | 1 | 4967 | import sys
import datetime
import os
from subprocess import call
import re
nCores = [i for i in range(10, 110, 10)] + [i for i in range(150, 1050, 50)]
nSets = [1, 2, 4, 8, 16, 32, 64, 128]
ipcCosts = [(3, 17, 17)]
nServices = 4 #except NS
configFilePath = '/home/ybkim/workspace/osSim/config/aim7_shared'
outFilePath = './out'
data = []
if not os.path.exists("./out"):
call(["mkdir", "out"])
if not os.path.exists("./out/sim_logs"):
call(["mkdir", "./out/sim_logs"])
if os.path.exists("./config"):
call(["rm", "-r", "./config"])
call(["cp", "-r", configFilePath, "./config"])
with open(outFilePath+"/result.out", "w") as resultFile:
for n in nCores:
with open(configFilePath+'/system.json') as cfgFile, \
open("./config/system.json", "w") as tempCfgFile:
#open("./config/mono/apps.json", "w+") as tempAppsFile:
cfg = cfgFile.read()
pat = r'("nCores"): (\d+)'
replacement = r'\1: %d' % n
cfg = re.sub(pat, replacement, cfg)
pat = r'("nApps"): (\d+)'
replacement = r'\1: %d' % (n*2)
cfg = re.sub(pat, replacement, cfg)
pat = r'("osType"): (.+)'
replacement = r'\1: "micro",'
cfg = re.sub(pat, replacement, cfg)
pat = r'("unitTick"): (.+)'
replacement = r'\1: 10,'
cfg = re.sub(pat, replacement, cfg)
pat = r'("maxTick"): (.+)'
replacement = r'\1: 1000000,'
cfg = re.sub(pat, replacement, cfg)
tempCfgFile.write(cfg)
for i in nSets:
if i*(nServices+1) >= n:
data.append((n, i, 0, 0, 0, 0))
continue;
for ipcCost in ipcCosts:
with open(outFilePath+"/sim_logs/result_%d_cores_nSet_%d_ipcCost_%d.txt" % (n, i, ipcCost[0]), "w+") as outFile, \
open(configFilePath+"/micro/system.json", "r") as appsBaseFile, \
open("./config/micro/system.json", "w") as appsFile:
appsConfig = appsBaseFile.read()
pattern = r'("nSet"): (\d*)'
appsConfig = re.sub(pattern, '"nSet": '+str(i), appsConfig)
pattern = r'("ipcCost_die"): (\d*)'
appsConfig = re.sub(pattern, '"ipcCost_die": '+str(ipcCost[0]), appsConfig)
pattern = r'("ipcCost_hop"): (\d*)'
appsConfig = re.sub(pattern, '"ipcCost_hop": '+str(ipcCost[1]), appsConfig)
pattern = r'("ipcCost_2hops"): (\d*)'
appsConfig = re.sub(pattern, '"ipcCost_2hops": '+str(ipcCost[2]), appsConfig)
appsFile.write(appsConfig)
appsFile.close()
resultFile.write("simulating with nCores = %d, nSet = %f\n" % (n, i))
print "start running #cores: %d, nSet: %d, ipcCost: (%d, %d)" % (n, i, ipcCost[0], ipcCost[1])
call(["../osSim", "./config/"], stdout = outFile)
outFile.seek(-100, 2)
tail = outFile.read()
appsProcessed = int(re.search(r'total apps processed: (\d+)\n', tail).groups()[0])
resultFile.write("total processed apps = %d\n" % appsProcessed)
resultFile.write("processed apps / core = %f\n\n" % (float(appsProcessed)/float(n)))
data.append((n, i, appsProcessed))
print (n, i, appsProcessed)
call(["rm", outFilePath+"/sim_logs/result_%d_cores_nSet_%d_ipcCost_%d.txt" % (n, i, ipcCost[0])])
resultFile.write('\n')
"""
with open("data_plot.dat", "w") as plotData:
plotData.write("%s %s %s %s %s %s\n" % ("#nCores", "nSets", "ipcCost_inDie", "ipcCost_hop", "ipcCost_2hops", "proccessed"))
for line in data:
plotData.write("%d %d %d %d %d %d\n" % (line[0], line[1], line[2], line[3], line[4], line[5]))
"""
with open("data_plot.dat", "w") as plotData:
plotData.write("%s %s\n" % ("nSet", "proccessed"))
for line in data:
plotData.write("%d %d %d\n" % (line[0], line[1], line[2]))
#draw plot
with open("gnuplot_in", "w") as commands:
commands.write("""
set xlabel "nCores"
set ylabel "terminated jobs"
set term png
set output "plot.png"
lastx = NaN; lasty = NaN
plot """)
for i in nSets:
commands.write('"data_plot.dat" u ($2==%d?($1, lastx=$1):lastx):($2==%d?($3, lasty=$3):lasty) title "nSet=%d" with linespoints' % (i, i, i))
if i != nSets[-1]:
commands.write(',lastx=NaN, lasty=NaN\\')
commands.write('\n')
call(["gnuplot", "gnuplot_in"])
call(["mv", "gnuplot_in", "data_plot.dat", outFilePath])
now = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
call(["cp", "plot.png", "/home/ybkim/dropbox/Inbox/osSim/plot_%s.png" % (now)])
call(["cp", outFilePath + "/data_plot.dat", "/home/ybkim/dropbox/Inbox/osSim/data_%s.dat" % (now)])
| mit | -5,662,840,301,564,728,000 | 37.207692 | 148 | 0.521643 | false |
ScreamingUdder/mantid | scripts/SANS/sans/state/state_functions.py | 3 | 4652 | """Set of general purpose functions which are related to the SANSState approach."""
from __future__ import (absolute_import, division, print_function)
from sans.common.enums import (DetectorType)
from sans.common.xml_parsing import (get_monitor_names_from_idf_file, get_named_elements_from_ipf_file)
# ----------------------------------------------------------------------------------------------------------------------
# General functions
# ----------------------------------------------------------------------------------------------------------------------
def is_pure_none_or_not_none(elements_to_check):
"""
Checks a list of elements contains None entries and non-None entries
:param elements_to_check: a list with entries to check
:return: True if the list contains either only None or only non-None elements, else False
"""
are_all_none_or_all_not_none = True
if len(elements_to_check) == 0:
return are_all_none_or_all_not_none
return all(element is not None for element in elements_to_check) or \
all(element is None for element in elements_to_check) # noqa
def is_not_none_and_first_larger_than_second(elements_to_check):
"""
This function checks if both are not none and then checks if the first element is smaller than the second element.
:param elements_to_check: a list with two entries. The first is the lower bound and the second entry is the upper
bound
:return: False if at least one input is None or if both are not None and the first element is smaller than the
second else True
"""
is_invalid = True
if len(elements_to_check) != 2:
return is_invalid
if any(element is None for element in elements_to_check):
is_invalid = False
return is_invalid
if elements_to_check[0] < elements_to_check[1]:
is_invalid = False
return is_invalid
def one_is_none(elements_to_check):
return any(element is None for element in elements_to_check)
def validation_message(error_message, instruction, variables):
"""
Generates a validation message for the SANSState.
:param error_message: A message describing the error.
:param instruction: A message describing what to do to fix the error
:param variables: A dictionary which contains the variable names and values which are involved in the error.
:return: a formatted validation message string.
"""
message = ""
for key, value in sorted(variables.items()):
message += "{0}: {1}\n".format(key, value)
message += instruction
return {error_message: message}
def set_detector_names(state, ipf_path, invalid_detector_types=None):
"""
Sets the detectors names on a State object which has a `detector` map entry, e.g. StateMask
:param state: the state object
:param ipf_path: the path to the Instrument Parameter File
:param invalid_detector_types: a list of invalid detector types which don't exist for the instrument
"""
if invalid_detector_types is None:
invalid_detector_types = []
lab_keyword = DetectorType.to_string(DetectorType.LAB)
hab_keyword = DetectorType.to_string(DetectorType.HAB)
detector_names = {lab_keyword: "low-angle-detector-name",
hab_keyword: "high-angle-detector-name"}
detector_names_short = {lab_keyword: "low-angle-detector-short-name",
hab_keyword: "high-angle-detector-short-name"}
names_to_search = []
names_to_search.extend(list(detector_names.values()))
names_to_search.extend(list(detector_names_short.values()))
found_detector_names = get_named_elements_from_ipf_file(ipf_path, names_to_search, str)
for detector_type in state.detectors:
try:
if DetectorType.from_string(detector_type) in invalid_detector_types:
continue
detector_name_tag = detector_names[detector_type]
detector_name_short_tag = detector_names_short[detector_type]
detector_name = found_detector_names[detector_name_tag]
detector_name_short = found_detector_names[detector_name_short_tag]
except KeyError:
continue
state.detectors[detector_type].detector_name = detector_name
state.detectors[detector_type].detector_name_short = detector_name_short
def set_monitor_names(state, idf_path, invalid_monitor_names=None):
if invalid_monitor_names is None:
invalid_monitor_names = []
monitor_names = get_monitor_names_from_idf_file(idf_path, invalid_monitor_names)
state.monitor_names = monitor_names
| gpl-3.0 | -6,352,856,074,351,242,000 | 41.678899 | 120 | 0.650688 | false |
zeratul2099/plist-qt | dialogs.py | 1 | 30532 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# dialogs for plist-qt
#
from datetime import datetime, timedelta, date
import os
import smtplib
from email.mime.text import MIMEText
from email.header import Header
if os.environ.get('QT_API') == 'pyside':
from PySide.QtCore import *
from PySide.QtGui import *
else:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from puente.plist.models import Customer, PriceList, PlistSettings, Transaction
from puente.plist.views import renderPlot
from puente.pmenu.models import Category, MenuItem
from puente.pmenu.views import renderPdf
from primitives import *
class SettingsDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowIcon(QIcon('img/32x32/configure.png'))
self.setWindowTitle('Settings')
self.settings = None
self.c_prices = list()
self.p_prices = list()
layout = QVBoxLayout()
form_widget = QWidget()
form_layout = QFormLayout()
self.limit_edit = QLineEdit()
self.team_limit_edit = QLineEdit()
self.last_paid_limit_edit = QLineEdit()
self.mail_sender = QLineEdit()
self.mail_server = QLineEdit()
self.mail_password = QLineEdit()
self.mail_password.setEchoMode(QLineEdit.Password)
form_layout.addRow(QLabel('Limit:'), self.limit_edit)
form_layout.addRow(QLabel('Teamlimit:'), self.team_limit_edit)
form_layout.addRow(QLabel('Last Paid Limit (days):'), self.last_paid_limit_edit)
form_layout.addRow(QLabel('Email-Sender:'), self.mail_sender)
form_layout.addRow(QLabel('Email-Server:'), self.mail_server)
form_layout.addRow(QLabel('Email-Password:'), self.mail_password)
form_widget.setLayout(form_layout)
prices_widget = QWidget()
prices_layout = QHBoxLayout()
self.c_price_widget = PriceBox('Customer Prices')
self.p_price_widget = PriceBox('Team Prices')
prices_layout.addWidget(self.c_price_widget)
prices_layout.addWidget(self.p_price_widget)
prices_widget.setLayout(prices_layout)
button_box = QDialogButtonBox()
save_button = button_box.addButton(button_box.Save)
reset_button = button_box.addButton(button_box.Reset)
close_button = button_box.addButton(button_box.Close)
self.connect(save_button, SIGNAL('clicked()'), self.save_clicked)
self.connect(reset_button, SIGNAL('clicked()'), self.reset_clicked)
self.connect(close_button, SIGNAL('clicked()'), self.close_clicked)
layout.addWidget(form_widget)
layout.addWidget(prices_widget)
layout.addWidget(button_box)
self.setLayout(layout)
def update(self, settings, c_prices, p_prices):
self.settings = settings
self.c_prices = c_prices
self.p_prices = p_prices
self.c_price_widget.prices = c_prices
self.p_price_widget.prices = p_prices
self.limit_edit.setText(str(settings.custLimit))
self.team_limit_edit.setText(str(settings.teamLimit))
self.last_paid_limit_edit.setText(str(settings.markLastPaid))
if settings.mailSender:
self.mail_sender.setText(settings.mailSender)
if settings.mailServer:
self.mail_server.setText(settings.mailServer)
if settings.mailPassword:
self.mail_password.setText(settings.mailPassword)
self.c_price_widget.list.clear()
self.p_price_widget.list.clear()
for cp in c_prices:
self.c_price_widget.list.addItem('%.2f'%(cp.price/100.0)+u' \u20AC')
for pp in p_prices:
self.p_price_widget.list.addItem('%.2f'%(pp.price/100.0)+u' \u20AC')
def save_clicked(self):
self.settings.custLimit = int(self.limit_edit.text())
self.settings.teamLimit = int(self.team_limit_edit.text())
self.settings.markLastPaid = int(self.last_paid_limit_edit.text())
self.settings.mailSender = str(self.mail_sender.text())
self.settings.mailServer = str(self.mail_server.text())
self.settings.mailPassword = str(self.mail_password.text())
self.settings.save()
self.emit(SIGNAL('settingsChanged()'))
def close_clicked(self):
self.hide()
def reset_clicked(self):
settings = PlistSettings.objects.all()[0]
prices = PriceList.objects.filter(isPuente=False, settings=settings)
p_prices = PriceList.objects.filter(isPuente=True, settings=settings)
self.update(settings, prices, p_prices)
class PriceBox(QWidget):
def __init__(self, label):
QWidget.__init__(self)
self.prices = None
layout = QVBoxLayout()
layout.addWidget(QLabel(label))
button_widget = QWidget()
button_layout = QHBoxLayout()
self.add_button = QPushButton(QIcon.fromTheme('list-add'), 'Add')
self.del_button = QPushButton(QIcon.fromTheme('list-remove'), 'Del')
self.connect(self.del_button, SIGNAL('clicked()'), self.del_price)
button_layout.addWidget(self.add_button)
button_layout.addWidget(self.del_button)
self.new_price_field = QLineEdit()
self.new_price_field.setPlaceholderText(u'Price in \u20AC')
self.new_price_field.setValidator(QDoubleValidator(0.0,100.0,2, self.new_price_field))
layout.addWidget(self.new_price_field)
button_widget.setLayout(button_layout)
layout.addWidget(button_widget)
self.list = QListWidget()
layout.addWidget(self.list)
self.setLayout(layout)
def del_price(self):
idx = self.list.currentRow()
self.prices[idx].delete()
self.list.takeItem(idx)
self.emit(SIGNAL('settingsChanged()'))
class NewCustomerDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowIcon(QIcon('img/32x32/user-group-new.png'))
self.setWindowTitle('New Customer')
self.resize(280,160)
layout = QFormLayout()
self.setWindowTitle('New Customer')
self.name_field = QLineEdit()
self.email_field = EMailEdit()
self.room_field = QLineEdit()
self.team_box = QCheckBox()
self.locked_box = QCheckBox()
layout.addRow(QLabel('Name:'), self.name_field)
layout.addRow(QLabel('EMail:'), self.email_field)
layout.addRow(QLabel('Room-No:'), self.room_field)
layout.addRow(QLabel('Team:'), self.team_box)
layout.addRow(QLabel('Locked:'), self.locked_box)
button_box = QDialogButtonBox()
ok_button = button_box.addButton(button_box.Ok)
cancel_button = button_box.addButton(button_box.Cancel)
self.connect(ok_button, SIGNAL('clicked()'), self.ok_clicked)
self.connect(cancel_button, SIGNAL('clicked()'), self.cancel_clicked)
layout.addWidget(button_box)
self.setLayout(layout)
def ok_clicked(self):
weekday = date.today().weekday()
last_sunday = date.today() - timedelta(weekday+1)
if self.name_field.text() and self.room_field.text() and self.email_field.text():
new_customer = Customer(name=self.name_field.text(),
room=self.room_field.text(),
email=self.email_field.text(),
depts=0,
weeklySales=0,
salesSince=last_sunday,
lastPaid=datetime.now(),
dept_status=0,
isPuente=self.team_box.isChecked(),
locked=self.locked_box.isChecked())
new_customer.save()
self.emit(SIGNAL('newCustomer()'))
self.hide()
self.empty_fields()
def cancel_clicked(self):
self.hide()
self.empty_fields()
def empty_fields(self):
self.name_field.setText('')
self.email_field.setText('')
self.room_field.setText('')
self.team_box.setCheckState(False)
self.locked_box.setCheckState(False)
class CustomerDetailsDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowIcon(QIcon('img/32x32/user-properties.png'))
self.customer = None
meta_widget = QWidget()
meta_layout = QVBoxLayout()
meta_widget.setLayout(meta_layout)
self.msg_box = QMessageBox()
self.msg_box.setWindowTitle('Message')
self.msg_box.setWindowIcon(QIcon.fromTheme('dialog-information'))
form_layout = QFormLayout()
form_layout.setFieldGrowthPolicy(QFormLayout.FieldsStayAtSizeHint)
self.stacks = list()
self.name_stack = QStackedWidget()
self.email_stack = QStackedWidget()
self.room_stack = QStackedWidget()
self.team_stack = QStackedWidget()
self.locked_stack = QStackedWidget()
self.comment_stack = QStackedWidget()
self.button_stack = QStackedWidget()
button_container = QWidget()
button_container_layout = QHBoxLayout()
button_container_layout.addWidget(self.button_stack)
button_container_layout.addStretch()
button_container.setLayout(button_container_layout)
self.stacks.append(self.name_stack)
self.stacks.append(self.email_stack)
self.stacks.append(self.room_stack)
self.stacks.append(self.team_stack)
self.stacks.append(self.locked_stack)
self.stacks.append(self.comment_stack)
self.stacks.append(self.button_stack)
self.name_field = QLabel()
self.name_edit_field = QLineEdit()
self.name_stack.addWidget(self.name_field)
self.name_stack.addWidget(self.name_edit_field)
self.depts_field = DeptLabel()
self.weekly_sales_field = QLabel()
self.sales_since_field = QLabel()
self.email_field = QLabel()
self.email_edit_field = EMailEdit()
self.email_stack.addWidget(self.email_field)
self.email_stack.addWidget(self.email_edit_field)
self.room_field = QLabel()
self.room_edit_field = QLineEdit()
self.room_stack.addWidget(self.room_field)
self.room_stack.addWidget(self.room_edit_field)
self.team_label = QLabel()
self.team_box = QCheckBox()
self.team_stack.addWidget(self.team_label)
self.team_stack.addWidget(self.team_box)
self.locked_label = QLabel()
self.locked_box = QCheckBox()
self.locked_stack.addWidget(self.locked_label)
self.locked_stack.addWidget(self.locked_box)
self.comment_field = QLabel()
self.comment_edit_field = QLineEdit()
self.comment_stack.addWidget(self.comment_field)
self.comment_stack.addWidget(self.comment_edit_field)
form_layout.addRow(QLabel('Name:'), self.name_stack)
form_layout.addRow(QLabel('Depts:'), self.depts_field)
form_layout.addRow(QLabel('Weekly Sales:'), self.weekly_sales_field)
form_layout.addRow(QLabel('...since:'), self.sales_since_field)
form_layout.addRow(QLabel('EMail:'), self.email_stack)
form_layout.addRow(QLabel('Room-No:'), self.room_stack)
form_layout.addRow(QLabel('Team:'), self.team_stack)
form_layout.addRow(QLabel('Locked:'), self.locked_stack)
form_layout.addRow(QLabel('Comment:'), self.comment_stack)
edit_button = QPushButton(QIcon('img/16x16/configure.png'), 'Edit')
save_button = QPushButton(QIcon.fromTheme('document-save'), 'Save')
self.button_stack.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
#edit_button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
#save_button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
self.button_stack.addWidget(edit_button)
self.button_stack.addWidget(save_button)
mail_button = QCommandLinkButton('Notification Mail', 'Send Email')
mail_button.setIcon(QIcon.fromTheme('mail-send'))
mail_button.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
form_layout.addRow(button_container)
self.stats_image = StatsDialog(False)
button_box = QDialogButtonBox()
ok_button = button_box.addButton(button_box.Ok)
self.connect(edit_button, SIGNAL('clicked()'), self.show_edit_fields)
self.connect(save_button, SIGNAL('clicked()'), self.save_edit)
self.connect(mail_button, SIGNAL('clicked()'), self.send_email)
self.connect(ok_button, SIGNAL('clicked()'), self.ok_clicked)
form_widget = QWidget()
form_widget.setLayout(form_layout)
form_widget.setSizePolicy(QSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum))
details_widget = QWidget()
details_layout = QHBoxLayout()
details_widget.setLayout(details_layout)
mail_widget = QWidget()
mail_layout = QVBoxLayout()
mail_widget.setLayout(mail_layout)
mail_layout.addWidget(mail_button)
mail_layout.addStretch()
details_layout.addWidget(form_widget)
details_layout.addWidget(mail_widget)
self.stats_image.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding))
self.stats_image.tabs.insertTab(0, details_widget, QIcon.fromTheme('document-open'), 'Details')
self.stats_image.tabs.setCurrentIndex(0)
#meta_layout.addWidget(form_widget)
meta_layout.addWidget(self.stats_image)
meta_layout.addWidget(button_box)
meta_layout.setStretchFactor(self.stats_image,5)
#
self.setLayout(meta_layout)
def show_edit_fields(self):
for stack in self.stacks:
stack.setCurrentIndex(1)
def save_edit(self):
self.customer.name = unicode(self.name_edit_field.text())
self.customer.email = self.email_edit_field.text()
self.customer.room = self.room_edit_field.text()
self.customer.isPuente = self.team_box.isChecked()
self.customer.locked = self.locked_box.isChecked()
self.customer.comment = self.comment_edit_field.text()
self.customer.save()
self.emit(SIGNAL('customerEdited()'))
self.update(self.customer)
def update(self, customer):
self.setWindowTitle(customer.name + ' Details')
self.empty_fields()
self.customer = customer
transactions = Transaction.objects.filter(customer=customer).order_by("time").reverse()
self.stats_image.update(transactions)
if self.stats_image.canvas:
self.resize(self.stats_image.canvas.width(), self.stats_image.canvas.height()+75)
self.setWindowTitle(customer.name + ' details')
self.name_field.setText(customer.name)
self.name_edit_field.setText(customer.name)
self.depts_field.update(customer)
self.weekly_sales_field.setText(str(customer.weeklySales) + u' \u20AC')
self.sales_since_field.setText(customer.salesSince.strftime('%d.%m.%Y'))
self.email_field.setText(customer.email)
self.email_edit_field.setText(customer.email)
self.room_field.setText(customer.room)
self.room_edit_field.setText(customer.room)
self.team_label.setText('Yes' if customer.isPuente else 'No')
self.team_box.setChecked(True if customer.isPuente else False)
self.locked_label.setText('Yes' if customer.locked else 'No')
self.locked_box.setChecked(True if customer.locked else False)
self.comment_field.setText(customer.comment)
self.comment_edit_field.setText(customer.comment)
def customer_updated(self):
if self.customer:
customer = Customer.objects.get(name=self.customer.name)
self.update(customer)
def ok_clicked(self):
self.hide()
self.empty_fields()
def empty_fields(self):
for stack in self.stacks:
stack.setCurrentIndex(0)
self.name_field.setText('')
self.email_field.setText('')
self.room_field.setText('')
self.name_edit_field.setText('')
self.email_edit_field.setText('')
self.room_edit_field.setText('')
self.team_label.setText('')
self.locked_label.setText('')
self.team_box.setChecked(False)
self.locked_box.setChecked(False)
self.comment_field.setText('')
self.comment_edit_field.setText('')
def send_email(self):
self.confirm_dialog = ConfirmationDialog('Really send Email to ' + self.customer.name + '?')
self.confirm_dialog.setModal(True)
self.connect(self.confirm_dialog.button_box, SIGNAL('rejected()'), self.confirm_dialog.hide)
self.connect(self.confirm_dialog.button_box, SIGNAL('accepted()'), self.send_email_confirmed)
self.confirm_dialog.show()
def send_email_confirmed(self):
# construct mail ...
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
try:
settings = PlistSettings.objects.all()[0]
fr = settings.mailSender
to = self.customer.email
text = "Hallo "
text += "%s" %(self.customer.name)
text += ",\n\n"
text += u"du hast in der Pünte %.2f Euro Schulden.\n" %(self.customer.depts)
text += u"Bitte bezahle diese bei deinem nächsten Besuch.\n"
text += u"Viele Grüße, dein Püntenteam"
# comment these two lines out to remove signature from mail
#command = u"echo '%s' | gpg2 --clearsign --passphrase %s --batch -u 'Pünte OSS' --yes -o -"%(text, config.PASSPHRASE)
#text = os.popen(command.encode('utf-8')).read()
#msg = Message()
msg = MIMEText(text, 'plain', _charset='UTF-8')
#msg.set_payload(text)
msg["Subject"] = Header("[Pünte]Zahlungserinnerung", 'utf8')
fromhdr = Header(u"Pünte", 'utf8')
fromhdr.append(u"<%s>"%fr, 'ascii')
msg["From"] = fromhdr
tohdr = Header("%s"%self.customer.name, 'utf8')
tohdr.append("<%s>" %( self.customer.email), 'ascii')
msg["To"] = tohdr
date = datetime.now()
msg["Date"] = date.strftime("%a, %d %b %Y %H:%M:%S")
# ... and try to send it
#
print 'connecting...'
server = str(settings.mailServer.partition(':')[0])
port = int(settings.mailServer.partition(':')[2])
print server, port
try:
s = smtplib.SMTP(server, port)
s.ehlo()
s.starttls()
print 'logging in...'
s.login(fr, str(settings.mailPassword))
print 'sending...'
s.sendmail(fr, self.customer.email, msg.as_string())
self.msg_box.setText("Erinnerungsmail an %s verschickt" %(self.customer.name))
s.quit()
print 'connection terminated'
except Exception, e:
print e
s.quit()
self.msg_box.setText("Fehler beim Versenden")
finally:
self.confirm_dialog.hide()
QApplication.restoreOverrideCursor()
self.msg_box.show()
class StatsDialog(QDialog):
def __init__(self, standalone=True):
QDialog.__init__(self)
self.setWindowIcon(QIcon('img/32x32/view-statistics.png'))
self.setWindowTitle('Statistics')
layout = QVBoxLayout()
self.canvas_width = 0
self.canvas_height = 0
self.canvas = None
self.standalone = standalone
self.page = 0
self.len_page = 100
self.fig = None
self.tabs = QTabWidget()
self.stats_image = QWidget()
self.tabs.addTab(self.stats_image, QIcon('img/32x32/view-investment.png'), 'Statistics')
self.list_container = QWidget()
list_layout = QVBoxLayout()
self.list_pager = QWidget()
pager_layout = QHBoxLayout()
self.page_num_label = QLabel()
first_button = QPushButton(QIcon.fromTheme('arrow-left-double', QIcon.fromTheme('go-first')), '')
prev_button = QPushButton(QIcon.fromTheme('arrow-left', QIcon.fromTheme('go-previous')), '')
next_button = QPushButton(QIcon.fromTheme('arrow-right', QIcon.fromTheme('go-next')), '')
last_button = QPushButton(QIcon.fromTheme('arrow-right-double', QIcon.fromTheme('go-last')), '')
self.connect(prev_button, SIGNAL('clicked()'), self.prev_page)
self.connect(next_button, SIGNAL('clicked()'), self.next_page)
self.connect(first_button, SIGNAL('clicked()'), self.first_page)
self.connect(last_button, SIGNAL('clicked()'), self.last_page)
pager_layout.addStretch()
pager_layout.addWidget(first_button)
pager_layout.addWidget(prev_button)
pager_layout.addWidget(self.page_num_label)
pager_layout.addWidget(next_button)
pager_layout.addWidget(last_button)
pager_layout.addStretch()
self.list_widget = QTableWidget()
self.list_widget.insertColumn(0)
self.list_widget.insertColumn(0)
self.list_widget.insertColumn(0)
self.list_widget.setColumnWidth(0, 150)
self.list_widget.setColumnWidth(2, 150)
self.list_widget.setHorizontalHeaderItem(0, QTableWidgetItem('Name'))
self.list_widget.setHorizontalHeaderItem(1, QTableWidgetItem('Price'))
self.list_widget.setHorizontalHeaderItem(2, QTableWidgetItem('Time/Date'))
self.list_pager.setLayout(pager_layout)
list_layout.addWidget(self.list_pager)
list_layout.addWidget(self.list_widget)
self.list_container.setLayout(list_layout)
self.tabs.addTab(self.list_container, QIcon('img/32x32/view-income-categories.png'), 'List')
layout.addWidget(self.tabs)
if self.standalone:
button_box = QDialogButtonBox()
ok_button = button_box.addButton(button_box.Ok)
self.connect(ok_button, SIGNAL('clicked()'), self.ok_clicked)
layout.addWidget(button_box)
self.setLayout(layout)
def update(self, transactions):
self.transactions = transactions
self.page = 0
self.update_list(self.transactions)
fig = renderPlot(self.transactions)
if self.canvas:
self.canvas.setParent(None)
self.canvas.destroy()
try:
self.canvas = FigureCanvasQTAgg(fig)
self.hide()
self.canvas.setParent(self.stats_image)
self.show()
self.resize(self.canvas.width(),self.canvas.height()+100)
except AttributeError:
# pass if there are still no transactions
pass
def update_list(self, transactions):
self.transactions = transactions
self.list_widget.clear()
self.list_widget.setRowCount(0)
reverse_transactions = transactions.reverse()
self.page_num_label.setText(str(self.page+1) + '/' + str(len(transactions)/self.len_page+1))
for idx, transaction in enumerate(transactions[self.page*self.len_page:(self.page+1)*self.len_page]):
self.list_widget.insertRow(idx)
self.list_widget.setCellWidget(idx, 0, QLabel(transaction.customer.name))
self.list_widget.setCellWidget(idx, 1, QLabel(str(transaction.price) + u' \u20AC'))
self.list_widget.setCellWidget(idx, 2, QLabel(transaction.time.strftime('%H:%M:%S, %d.%m.%Y')))
def first_page(self):
if self.page != 0:
self.page = 0
self.update_list(self.transactions)
def prev_page(self):
if self.page > 0:
self.page -= 1
self.update_list(self.transactions)
def next_page(self):
if self.page < len(self.transactions)/self.len_page:
self.page += 1
self.update_list(self.transactions)
def last_page(self):
if self.page != len(self.transactions)/self.len_page:
self.page = len(self.transactions)/self.len_page
self.update_list(self.transactions)
def ok_clicked(self):
self.hide()
class MenuEditDialog(QWidget):
def __init__(self):
QWidget.__init__(self)
self.setWindowTitle('Menu Edit')
self.setWindowIcon(QIcon('img/32x32/wine.png'))
self.resize(600,600)
layout = QVBoxLayout()
self.add_cat_field = QLineEdit()
add_cat_button = QPushButton(QIcon.fromTheme('list-add'), 'Add Category')
get_pdf_button = QPushButton(QIcon.fromTheme('application-pdf', QIcon.fromTheme('x-office-document')), 'Get Pdf')
control_widget = QWidget()
control_layout = QHBoxLayout()
control_layout.addWidget(self.add_cat_field)
control_layout.addWidget(add_cat_button)
control_layout.addWidget(get_pdf_button)
control_widget.setLayout(control_layout)
layout.addWidget(control_widget)
self.table = MenuTableWidget()
layout.addWidget(self.table)
button_box = QDialogButtonBox()
ok_button = button_box.addButton(button_box.Ok)
self.connect(ok_button, SIGNAL('clicked()'), self.ok_clicked)
self.connect(add_cat_button, SIGNAL('clicked()'), self.add_cat)
self.connect(get_pdf_button, SIGNAL('clicked()'), self.get_pdf)
layout.addWidget(button_box)
self.setLayout(layout)
def get_pdf(self):
filename = QFileDialog.getSaveFileName(self, 'Save Menu', '~/menu.pdf', 'Pdf-File (*.pdf)')
if filename:
with open(filename, 'w') as file:
renderPdf(file)
def ok_clicked(self):
self.hide()
def add_cat(self):
name = str(self.add_cat_field.text())
if name:
new_cat = Category(name=name)
new_cat.save()
self.table.update()
self.add_cat_field.setText('')
class MenuTableWidget(QTableWidget):
def __init__(self):
QTableWidget.__init__(self)
self.horizontalHeader().hide()
self.verticalHeader().hide()
self.setColumnCount(4)
self.setHorizontalHeaderItem(0, QTableWidgetItem('Name'))
self.setHorizontalHeaderItem(1, QTableWidgetItem('Price'))
self.setHorizontalHeaderItem(2, QTableWidgetItem('Team Price'))
self.setHorizontalHeaderItem(3, QTableWidgetItem('Add / Delete'))
self.setSelectionMode(QAbstractItemView.NoSelection)
self.update()
def update(self):
cats = Category.objects.all().order_by("name")
for i in range(self.rowCount()):
self.removeRow(0)
row_counter = 0
for cat in cats:
self.insertRow(row_counter)
cat_label = QLabel(cat.name)
cat_label.setStyleSheet('QLabel { font-weight: bold; }')
del_cat_button = DelCategoryButton(cat)
self.setCellWidget(row_counter, 0, cat_label)
self.setCellWidget(row_counter, 3, del_cat_button)
self.connect(del_cat_button, SIGNAL('clicked()'), self.del_category)
row_counter += 1
self.insertRow(row_counter)
add_item_button = AddMenuItemButton(cat)
self.setCellWidget(row_counter, 0, add_item_button.name_field)
self.setCellWidget(row_counter, 1, add_item_button.price_field)
self.setCellWidget(row_counter, 2, add_item_button.p_price_field)
self.setCellWidget(row_counter, 3, add_item_button)
self.connect(add_item_button, SIGNAL('clicked()'), self.add_item)
row_counter += 1
for item in MenuItem.objects.filter(category=cat, available=True).order_by('name'):
self.insertRow(row_counter)
self.setCellWidget(row_counter, 0, QLabel(item.name))
self.setCellWidget(row_counter, 1, QLabel(u'%.2f \u20AC'%(item.price/100.0)))
self.setCellWidget(row_counter, 2, QLabel(u'%.2f \u20AC'%(item.pPrice/100.0)))
del_menu_item_button = DelMenuItemButton(item)
self.setCellWidget(row_counter, 3, del_menu_item_button)
self.connect(del_menu_item_button, SIGNAL('clicked()'), self.del_item)
row_counter += 1
def add_item(self):
sender = self.sender()
if sender.name_field.text() and sender.price_field.text() and sender.p_price_field.text():
new_item = MenuItem(name=str(sender.name_field.text()), category=sender.cat, price=float(sender.price_field.text().replace(',','.'))*100, pPrice=float(sender.p_price_field.text().replace(',','.'))*100)
new_item.save()
self.update()
self.emit(SIGNAL('settingsChanged()'))
def del_item(self):
item = self.sender().menu_item
item.delete()
self.update()
self.emit(SIGNAL('settingsChanged()'))
def del_category(self):
cat = self.sender().cat
cat.delete()
self.update()
self.emit(SIGNAL('settingsChanged()'))
| gpl-3.0 | -4,817,067,510,547,112,000 | 42.730659 | 213 | 0.617645 | false |
danielbair/aeneas | aeneas/tools/__init__.py | 5 | 1174 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**aeneas.tools** is a collection of command line tools
that can be run by the end user to perform tasks
such as computing or converting sync maps.
"""
| agpl-3.0 | 8,642,295,671,707,423,000 | 40.928571 | 77 | 0.75724 | false |
sderrick57/adaTRHsensor | examples/encoder/callback_encoder.py | 3 | 1638 | #!/usr/bin/env python
__author__ = 'Copyright (c) 2015 Alan Yorinks All rights reserved.'
"""
Copyright (c) 2015 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
"""
This demo illustrates retrieving data from an encoder using a callback
It will only work on an Arduino Uno and requires the PyMataPlus sketch to be installed on the Arduino
"""
import time
import signal
import sys
from PyMata.pymata import PyMata
# encoder pins
ENCODER_A = 14
ENCODER_B = 15
# Indices into callback return data list
DEVICE = 0
PIN = 1
DATA = 2
def encoder_callback(data):
print("Data = %d" % data[DATA])
# create a PyMata instance
board = PyMata("/dev/ttyACM0")
def signal_handler(sig, frame):
print('You pressed Ctrl+C!!!!')
if board is not None:
board.reset()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# configure the pins for the encoder
board.encoder_config(ENCODER_B, ENCODER_A, encoder_callback)
while 1:
time.sleep(.2)
| gpl-3.0 | 3,604,275,254,451,445,000 | 23.818182 | 101 | 0.742979 | false |
ojengwa/oh-mainline | vendor/packages/docutils/test/test_transforms/test_peps.py | 19 | 1698 | #! /usr/bin/env python
# $Id: test_peps.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Tests for docutils.transforms.peps.
"""
from __init__ import DocutilsTestSupport
from docutils.transforms.peps import TargetNotes
from docutils.parsers.rst import Parser
def suite():
parser = Parser()
s = DocutilsTestSupport.TransformTestSuite(parser)
s.generateTests(totest)
return s
totest = {}
totest['target_notes'] = ((TargetNotes,), [
["""\
No references or targets exist, therefore
no "References" section should be generated.
""",
"""\
<document source="test data">
<paragraph>
No references or targets exist, therefore
no "References" section should be generated.
"""],
["""\
A target exists, here's the reference_.
A "References" section should be generated.
.. _reference: http://www.example.org
""",
"""\
<document source="test data">
<paragraph>
A target exists, here's the \n\
<reference name="reference" refname="reference">
reference
\n\
<footnote_reference auto="1" ids="id3" refname="TARGET_NOTE: id2">
.
A "References" section should be generated.
<target ids="reference" names="reference" refuri="http://www.example.org">
<section ids="id1">
<title>
References
<footnote auto="1" ids="id2" names="TARGET_NOTE:\ id2">
<paragraph>
<reference refuri="http://www.example.org">
http://www.example.org
"""],
])
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| agpl-3.0 | 2,306,113,506,760,102,000 | 24.727273 | 78 | 0.635453 | false |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/numpy-1.6.0-py2.7-linux-x86_64-ucs4.egg/numpy/testing/noseclasses.py | 27 | 14194 | # These classes implement a doctest runner plugin for nose, a "known failure"
# error class, and a customized TestProgram for NumPy.
# Because this module imports nose directly, it should not
# be used except by nosetester.py to avoid a general NumPy
# dependency on nose.
import os
import doctest
import nose
from nose.plugins import doctests as npd
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.plugins.base import Plugin
from nose.util import src, getpackage
import numpy
from nosetester import get_package_name
import inspect
_doctest_ignore = ['generate_numpy_api.py', 'scons_support.py',
'setupscons.py', 'setup.py']
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
# not found in extension modules, http://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
#print '_fm C1' # dbg
return True
elif inspect.isfunction(object):
#print '_fm C2' # dbg
return module.__dict__ is object.func_globals
elif inspect.isbuiltin(object):
#print '_fm C2-1' # dbg
return module.__name__ == object.__module__
elif inspect.isclass(object):
#print '_fm C3' # dbg
return module.__name__ == object.__module__
elif inspect.ismethod(object):
# This one may be a bug in cython that fails to correctly set the
# __module__ attribute of methods, but since the same error is easy
# to make by extension code writers, having this safety in place
# isn't such a bad idea
#print '_fm C3-1' # dbg
return module.__name__ == object.im_class.__module__
elif inspect.getmodule(object) is not None:
#print '_fm C4' # dbg
#print 'C4 mod',module,'obj',object # dbg
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
#print '_fm C5' # dbg
return module.__name__ == object.__module__
elif isinstance(object, property):
#print '_fm C6' # dbg
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
doctest.DocTestFinder._find(self,tests, obj, name, module,
source_lines, globs, seen)
# Below we re-run pieces of the above method with manual modifications,
# because the original code is buggy and fails to correctly identify
# doctests in extension modules.
# Local shorthands
from inspect import isroutine, isclass, ismodule, isfunction, \
ismethod
# Look for tests in a module's contained objects.
if ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname1 = '%s.%s' % (name, valname)
if ( (isroutine(val) or isclass(val))
and self._from_module(module, val) ):
self._find(tests, val, valname1, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if isclass(obj) and self._recurse:
#print 'RECURSE into class:',obj # dbg
for valname, val in obj.__dict__.items():
#valname1 = '%s.%s' % (name, valname) # dbg
#print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((isfunction(val) or isclass(val) or
ismethod(val) or isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
class NumpyDocTestCase(npd.DocTestCase):
"""Proxy for DocTestCase: provides an address() method that
returns the correct address for the doctest case. Otherwise
acts as a proxy to the test case. To provide hints for address(),
an obj may also be passed -- this will be used as the test object
for purposes of determining the test address, if it is provided.
"""
# doctests loaded via find(obj) omit the module name
# so we need to override id, __repr__ and shortDescription
# bonus: this will squash a 2.3 vs 2.4 incompatiblity
def id(self):
name = self._dt_test.name
filename = self._dt_test.filename
if filename is not None:
pk = getpackage(filename)
if pk is not None and not name.startswith(pk):
name = "%s.%s" % (pk, name)
return name
# second-chance checker; if the default comparison doesn't
# pass, then see if the expected output string contains flags that
# tell us to ignore the output
class NumpyOutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
ret = doctest.OutputChecker.check_output(self, want, got,
optionflags)
if not ret:
if "#random" in want:
return True
# it would be useful to normalize endianness so that
# bigendian machines don't fail all the tests (and there are
# actually some bigendian examples in the doctests). Let's try
# making them all little endian
got = got.replace("'>","'<")
want= want.replace("'>","'<")
# try to normalize out 32 and 64 bit default int sizes
for sz in [4,8]:
got = got.replace("'<i%d'"%sz,"int")
want= want.replace("'<i%d'"%sz,"int")
ret = doctest.OutputChecker.check_output(self, want,
got, optionflags)
return ret
# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
# its constructor that blocks non-default arguments from being passed
# down into doctest.DocTestCase
class NumpyDocTestCase(npd.DocTestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, obj=None, result_var='_'):
self._result_var = result_var
self._nose_obj = obj
doctest.DocTestCase.__init__(self, test,
optionflags=optionflags,
setUp=setUp, tearDown=tearDown,
checker=checker)
print_state = numpy.get_printoptions()
class NumpyDoctest(npd.Doctest):
name = 'numpydoctest' # call nosetests with --with-numpydoctest
enabled = True
def options(self, parser, env=os.environ):
Plugin.options(self, parser, env)
def configure(self, options, config):
Plugin.configure(self, options, config)
self.doctest_tests = True
self.finder = NumpyDocTestFinder()
self.parser = doctest.DocTestParser()
# Turn on whitespace normalization, set a minimal execution context
# for doctests, implement a "#random" directive to allow executing a
# command while ignoring its output.
def loadTestsFromModule(self, module):
if not self.matches(module.__name__):
npd.log.debug("Doctest doesn't want module %s", module)
return
try:
tests = self.finder.find(module)
except AttributeError:
# nose allows module.__test__ = False; doctest does not and
# throws AttributeError
return
if not tests:
return
tests.sort()
module_file = src(module.__file__)
for test in tests:
if not test.examples:
continue
if not test.filename:
test.filename = module_file
pkg_name = get_package_name(os.path.dirname(test.filename))
# Each doctest should execute in an environment equivalent to
# starting Python and executing "import numpy as np", and,
# for SciPy packages, an additional import of the local
# package (so that scipy.linalg.basic.py's doctests have an
# implicit "from scipy import linalg" as well.
#
# Note: __file__ allows the doctest in NoseTester to run
# without producing an error
test.globs = {'__builtins__':__builtins__,
'__file__':'__main__',
'__name__':'__main__',
'np':numpy}
# add appropriate scipy import for SciPy tests
if 'scipy' in pkg_name:
p = pkg_name.split('.')
p1 = '.'.join(p[:-1])
p2 = p[-1]
test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
# always use whitespace and ellipsis options
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
yield NumpyDocTestCase(test,
optionflags=optionflags,
checker=NumpyOutputChecker())
# Add an afterContext method to nose.plugins.doctests.Doctest in order
# to restore print options to the original state after each doctest
def afterContext(self):
numpy.set_printoptions(**print_state)
# Ignore NumPy-specific build files that shouldn't be searched for tests
def wantFile(self, file):
bn = os.path.basename(file)
if bn in _doctest_ignore:
return False
return npd.Doctest.wantFile(self, file)
class KnownFailureTest(Exception):
'''Raise this exception to mark a test as a known failing test.'''
pass
class KnownFailure(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
knownfail = ErrorClass(KnownFailureTest,
label='KNOWNFAIL',
isfailure=False)
def options(self, parser, env=os.environ):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
help='Disable special handling of KnownFailureTest '
'exceptions')
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noKnownFail', False)
if disable:
self.enabled = False
class NpConfig(nose.core.Config):
''' Class to pull out nose doctest plugin after configuration
This allows the user to set doctest related settings in their
configuration. For example, without this fix, a setting of
'with-doctest=1' in the user's .noserc file would cause an error, if
we remove the doctest extension before this stage. Our configure
uses the plugin to parse any settings, but then removed the doctest
plugin because the numpy doctester should be used for doctests
instead.
'''
def __init__(self, config):
self.__dict__ = config.__dict__
def configure(self, *args, **kwargs):
super(NpConfig, self).configure(*args, **kwargs)
self.plugins.plugins = [p for p in self.plugins.plugins
if p.name != 'doctest']
# Our class has two uses. First, to allow us to use NpConfig above to
# remove the doctest plugin after it has parsed the configuration.
# Second we save the results of the tests in runTests - see runTests
# method docstring for details
class NumpyTestProgram(nose.core.TestProgram):
def makeConfig(self, *args, **kwargs):
"""Load a Config, pre-filled with user config files if any are
found.
We override this method only to allow us to return a NpConfig
object instead of a Config object.
"""
config = super(NumpyTestProgram, self).makeConfig(*args, **kwargs)
return NpConfig(config)
def runTests(self):
"""Run Tests. Returns true on success, false on failure, and
sets self.success to the same value.
Because nose currently discards the test result object, but we need
to return it to the user, override TestProgram.runTests to retain
the result
"""
if self.testRunner is None:
self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
verbosity=self.config.verbosity,
config=self.config)
plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
if plug_runner is not None:
self.testRunner = plug_runner
self.result = self.testRunner.run(self.test)
self.success = self.result.wasSuccessful()
return self.success
| gpl-3.0 | 7,707,504,016,922,917,000 | 39.438746 | 87 | 0.590249 | false |
lichia/luigi | test/recursion_test.py | 22 | 1624 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import datetime
from helpers import unittest
import luigi
import luigi.interface
from luigi.mock import MockTarget
class Popularity(luigi.Task):
date = luigi.DateParameter(default=datetime.date.today() - datetime.timedelta(1))
def output(self):
return MockTarget('/tmp/popularity/%s.txt' % self.date.strftime('%Y-%m-%d'))
def requires(self):
return Popularity(self.date - datetime.timedelta(1))
def run(self):
f = self.output().open('w')
for line in self.input().open('r'):
print(int(line.strip()) + 1, file=f)
f.close()
class RecursionTest(unittest.TestCase):
def setUp(self):
MockTarget.fs.get_all_data()['/tmp/popularity/2009-01-01.txt'] = b'0\n'
def test_invoke(self):
w = luigi.worker.Worker()
w.add(Popularity(datetime.date(2010, 1, 1)))
w.run()
w.stop()
self.assertEqual(MockTarget.fs.get_data('/tmp/popularity/2010-01-01.txt'), b'365\n')
| apache-2.0 | -5,870,679,033,862,082,000 | 28.527273 | 92 | 0.678571 | false |
viewfinderco/viewfinder | backend/www/test/auth_facebook_test.py | 13 | 11384 | #!/usr/bin/env python
#
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Account authorization tests for Facebook and Facebook accounts.
"""
__authors__ = ['[email protected] (Spencer Kimball)',
'[email protected] (Andrew Kimball)']
import json
import mock
import os
import time
import unittest
import urllib
from functools import partial
from tornado import httpclient, ioloop
from viewfinder.backend.base import util
from viewfinder.backend.base.testing import async_test_timeout, MockAsyncHTTPClient
from viewfinder.backend.db.contact import Contact
from viewfinder.backend.db.device import Device
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.user import User
from viewfinder.backend.www.test import auth_test, facebook_utils, service_base_test
@unittest.skip("needs facebook credentials")
@unittest.skipIf('NO_NETWORK' in os.environ, 'no network')
class AuthFacebookTestCase(service_base_test.ServiceBaseTestCase):
"""Tests authentication via the Facebook OAuth service."""
def setUp(self):
super(AuthFacebookTestCase, self).setUp()
self._facebook_user_dict = {'first_name': 'Andrew', 'last_name': 'Kimball', 'name': 'Andrew Kimball',
'id': 'id', 'link': 'http://www.facebook.com/andrew.kimball.50',
'timezone':-7, 'locale': 'en_US', 'email': '[email protected]',
'picture': {'data': {'url': 'http://foo.com/pic.jpg',
'is_silhouette': False}},
'verified': True}
self._facebook_user2_dict = {'name': 'Spencer Kimball', 'id': 'id2'}
self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S',
'os': 'iOS 5.0.1', 'push_token': 'push_token'}
def testRegisterWebUser(self):
"""Test successful register of web user."""
# Register as web user, register as mobile user (2nd attempt is error).
self._tester.RegisterFacebookUser(self._facebook_user_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterFacebookUser,
self._facebook_user_dict,
self._mobile_device_dict)
def testRegisterMobileUser(self):
"""Test successful register of mobile user."""
# Register as mobile user, register as web user (2nd attempt is error).
self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
self.assertRaisesHttpError(403,
self._tester.RegisterFacebookUser,
self._facebook_user_dict)
def testLoginWebUser(self):
"""Test successful login of web user."""
# Register as web user, login as web user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
user2, device_id2 = self._tester.LoginFacebookUser(self._facebook_user_dict)
self.assertEqual(user.user_id, user2.user_id)
self.assertEqual(device_id, device_id2)
# And login again as mobile user.
self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
def testLoginMobileUser(self):
"""Test successful login of mobile user."""
# Register as web user, login as mobile user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
user2, device_id2 = self._tester.LoginFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
self.assertEqual(user.user_id, user2.user_id)
self.assertNotEqual(device_id, device_id2)
# And login again as web user.
self._tester.LoginFacebookUser(self._facebook_user_dict)
def testLinkWebUser(self):
"""Test successful link of web user."""
# Register as mobile user, link as web user
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict, self._mobile_device_dict)
cookie = self._GetSecureUserCookie(user, device_id)
user2, device_id2 = self._tester.LinkFacebookUser(self._facebook_user2_dict, user_cookie=cookie)
self.assertEqual(user.user_id, user2.user_id)
self.assertNotEqual(device_id, device_id2)
# And link again as mobile user.
self._tester.LinkFacebookUser(self._facebook_user2_dict, self._mobile_device_dict, user_cookie=cookie)
self.assertEqual(len(self._tester.ListIdentities(cookie)), 2)
def testLinkMobileUser(self):
"""Test successful link of mobile user."""
# Register as web user, link as mobile user.
user, device_id = self._tester.RegisterFacebookUser(self._facebook_user_dict)
cookie = self._GetSecureUserCookie(user, device_id)
self._tester.LinkFacebookUser(self._facebook_user2_dict, self._mobile_device_dict, user_cookie=cookie)
# And link again as web user.
self._tester.LinkFacebookUser(self._facebook_user2_dict, user_cookie=cookie)
self.assertEqual(len(self._tester.ListIdentities(cookie)), 2)
def testLoginNoExist(self):
"""ERROR: Try to login with Facebook identity that is not linked to a Viewfinder account."""
self.assertRaisesHttpError(403, self._tester.LoginFacebookUser, self._facebook_user_dict)
self.assertRaisesHttpError(403, self._tester.LoginFacebookUser, self._facebook_user_dict,
self._mobile_device_dict)
def testAuthenticationFailed(self):
"""ERROR: Fail Facebook authentication (which returns None user_dict)."""
with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client:
mock_client.map(r'https://graph.facebook.com/me\?',
lambda request: httpclient.HTTPResponse(request, 400))
url = self.get_url('/register/facebook?access_token=access_token')
self.assertRaisesHttpError(401,
auth_test._SendAuthRequest,
self._tester,
url,
'POST',
user_cookie=self._cookie,
request_dict=auth_test._CreateRegisterRequest(self._mobile_device_dict))
def testMissingAccessToken(self):
"""ERROR: Test error on missing facebook access token."""
self.assertRaisesHttpError(400,
auth_test._SendAuthRequest,
self._tester,
self.get_url('/register/facebook'),
'POST',
request_dict=auth_test._CreateRegisterRequest(self._mobile_device_dict))
@async_test_timeout(timeout=30)
def testFacebookRegistration(self):
"""Test end-end Facebook registration scenario using a test Facebook
account.
"""
self._validate = False
# Get one facebook test user by querying facebook.
fu = facebook_utils.FacebookUtils()
users = fu.QueryFacebookTestUsers(limit=1)
assert len(users) == 1, users
def _VerifyAccountStatus(cookie, results):
u = results['user']
dev = results['device']
ident = results['identity']
self.assertEqual(ident.user_id, u.user_id)
self.assertTrue(u.name)
self.assertTrue(u.given_name)
self.assertTrue(u.family_name)
self.assertIsNotNone(u.webapp_dev_id)
[self.assertEqual(getattr(dev, k), v) for k, v in self._mobile_device_dict.items()]
# Keep querying until notifications are found.
while True:
response_dict = self._SendRequest('query_notifications', cookie, {})
if len(response_dict['notifications']) > 2:
break
time.sleep(0.100)
self.assertEqual(response_dict['notifications'][1]['name'], 'register friend')
notification = response_dict['notifications'][2]
self.assertEqual(notification['name'], 'fetch_contacts')
sort_key = Contact.CreateSortKey(None, notification['timestamp'])
self.assertEqual(notification['invalidate']['contacts']['start_key'], sort_key)
self.stop()
def _VerifyResponse(response):
"""Verify successful registration. Query the identity and
contacts and verify against the actual test data in facebook.
"""
self.assertEqual(response.code, 200)
cookie = self._tester.GetCookieFromResponse(response)
user_dict = self._tester.DecodeUserCookie(cookie)
response_dict = json.loads(response.body)
self.assertTrue('user_id' in user_dict)
self.assertTrue('device_id' in user_dict)
self.assertEqual(user_dict['device_id'], response_dict['device_id'])
with util.DictBarrier(partial(_VerifyAccountStatus, cookie)) as b:
identity_key = 'FacebookGraph:%s' % users[0]['id']
Identity.Query(self._client, hash_key=identity_key, col_names=None,
callback=b.Callback('identity'))
User.Query(self._client, hash_key=user_dict['user_id'], col_names=None,
callback=b.Callback('user'))
Device.Query(self._client, hash_key=user_dict['user_id'], range_key=user_dict['device_id'],
col_names=None, callback=b.Callback('device'))
url = self.get_url('/link/facebook') + '?' + \
urllib.urlencode({'access_token': users[0]['access_token']})
self.http_client.fetch(url, method='POST',
headers={'Content-Type': 'application/json',
'X-Xsrftoken': 'fake_xsrf',
'Cookie': 'user=%s;_xsrf=fake_xsrf' % self._cookie},
body=json.dumps(auth_test._CreateRegisterRequest(self._mobile_device_dict)),
callback=_VerifyResponse)
def get_new_ioloop(self):
"""Override get_io_loop() to return IOLoop.instance(). The global IOLoop instance is used
by self.http_client.fetch in the testFacebookRegistration test.
"""
return ioloop.IOLoop.instance()
def _TestAuthFacebookUser(action, tester, user_dict, device_dict=None, user_cookie=None):
"""Called by the ServiceTester in order to test login/facebook, link/facebook, and
register/facebook calls.
"""
ident_dict = {'key': 'FacebookGraph:%s' % user_dict['id'], 'authority': 'Facebook',
'access_token': 'access_token'}
if device_dict:
device_dict.pop('device_uuid', None)
device_dict.pop('test_udid', None)
# Mock responses from Facebook.
with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client:
# Add response to request for an access token.
mock_client.map(r'https://graph.facebook.com/oauth/access_token',
'access_token=%s&expires=3600' % ident_dict['access_token'])
# Response to request for user info.
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me\?', user_dict)
# Add empty response to request for photos and friends.
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me/photos\?', {'data': []})
auth_test._AddMockJSONResponse(mock_client, r'https://graph.facebook.com/me/friends\?', {'data': []})
response = auth_test._AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie)
return auth_test._ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, response)
| apache-2.0 | -4,707,444,648,159,269,000 | 46.433333 | 115 | 0.651353 | false |
dmsurti/mayavi | mayavi/modules/api.py | 2 | 1134 | """ Defines the publicly accessible Mayavi2 modules.
"""
# Author: Frederic Petit and Prabhu Ramachandran
# Copyright (c) 2007-2015, Enthought, Inc.
# License: BSD Style.
from .axes import Axes
from .contour_grid_plane import ContourGridPlane
from .custom_grid_plane import CustomGridPlane
from .generic_module import GenericModule
from .glyph import Glyph
from .grid_plane import GridPlane
from .hyper_streamline import HyperStreamline
from .image_actor import ImageActor
from .image_plane_widget import ImagePlaneWidget
from .iso_surface import IsoSurface
from .labels import Labels
from .orientation_axes import OrientationAxes
from .outline import Outline
from .scalar_cut_plane import ScalarCutPlane
from .slice_unstructured_grid import SliceUnstructuredGrid
from .streamline import Streamline
from .structured_grid_outline import StructuredGridOutline
from .surface import Surface
from .text import Text
from .text3d import Text3D
from .tensor_glyph import TensorGlyph
from .vector_cut_plane import VectorCutPlane
from .vectors import Vectors
from .volume import Volume
from .warp_vector_cut_plane import WarpVectorCutPlane
| bsd-3-clause | 2,150,115,056,638,627,300 | 32.352941 | 58 | 0.826279 | false |
nathaliaspatricio/febracev | blog/tests.py | 4 | 1962 | """
>>> from django.test import Client
>>> from basic.blog.models import Post, Category
>>> import datetime
>>> from django.core.urlresolvers import reverse
>>> client = Client()
>>> category = Category(title='Django', slug='django')
>>> category.save()
>>> category2 = Category(title='Rails', slug='rails')
>>> category2.save()
>>> post = Post(title='DJ Ango', slug='dj-ango', body='Yo DJ! Turn that music up!', status=2, publish=datetime.datetime(2008,5,5,16,20))
>>> post.save()
>>> post2 = Post(title='Where my grails at?', slug='where', body='I Can haz Holy plez?', status=2, publish=datetime.datetime(2008,4,2,11,11))
>>> post2.save()
>>> post.categories.add(category)
>>> post2.categories.add(category2)
>>> response = client.get(reverse('blog_index'))
>>> response.context[-1]['object_list']
[<Post: DJ Ango>, <Post: Where my grails at?>]
>>> response.status_code
200
>>> response = client.get(reverse('blog_category_list'))
>>> response.context[-1]['object_list']
[<Category: Django>, <Category: Rails>]
>>> response.status_code
200
>>> response = client.get(category.get_absolute_url())
>>> response.context[-1]['object_list']
[<Post: DJ Ango>]
>>> response.status_code
200
>>> response = client.get(post.get_absolute_url())
>>> response.context[-1]['object']
<Post: DJ Ango>
>>> response.status_code
200
>>> response = client.get(reverse('blog_search'), {'q': 'DJ'})
>>> response.context[-1]['object_list']
[<Post: DJ Ango>]
>>> response.status_code
200
>>> response = client.get(reverse('blog_search'), {'q': 'Holy'})
>>> response.context[-1]['object_list']
[<Post: Where my grails at?>]
>>> response.status_code
200
>>> response = client.get(reverse('blog_search'), {'q': ''})
>>> response.context[-1]['message']
'Search term was too vague. Please try again.'
>>> response = client.get(reverse('blog_detail', args=[2008, 'apr', 2, 'where']))
>>> response.context[-1]['object']
<Post: Where my grails at?>
>>> response.status_code
200
"""
| gpl-2.0 | -1,170,170,518,746,329,300 | 28.727273 | 141 | 0.66055 | false |
powertomato/heuOpt_2017_G1 | ThreadRunner.py | 1 | 4499 | from multiprocessing import Process, Lock
import sys
import os
import copy
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from model.graph import Graph
from BEP_Visualizer.BEP_visualizer import View
from solvers.construction.DepthFirstVertexOrder import *
from solvers.construction.GreedyLeastPage import *
from solvers.construction.RandomEdgeAssignment import *
from solvers.construction.RandomVertexOrder import *
from model.node import Node
from model.edge import Edge
from model.page import Page
from solvers.LocalSearch.VariableNeighborhoodDescent import *
from solvers.evaluators.Evaluator import Evaluator, TimedEvaluator
from solvers.LocalSearch.SimpleLocalSearch import SimpleLocalSearch
from solvers.neighborhoods.Neighborhood import Neighborhood
from solvers.neighborhoods.MoveNode import MoveNodeCandidate
from solvers.neighborhoods.MoveNode import MoveNode
from solvers.neighborhoods.EdgePageMove import EdgePageMove
from solvers.neighborhoods.EdgePageMove import EdgePageMoveCandidate
import time
class ThreadRunner():
N_DFS = 1
N_RND = 2
E_GRD = 1
E_RND = 2
E_GRD_RND = 3
LS_LS = 1
LS_VND = 2
LS_GVNS = 3
LS_NODEMOVE = 1
LS_EDGEMOVE = 2
def __init__(self, threadID, graph, best_solution, crossing_nums, node_construction, edge_construction, iterations, lock, local_search=0, step=0, neighborhood=0):
self.threadID = threadID
self.graph = graph
self.best_solution = best_solution
self.crossing_nums = crossing_nums
self.node_construction = node_construction
self.edge_construction = edge_construction
self.local_search = local_search
self.step = step
self.neighborhood = neighborhood
self.iterations = iterations
self.process = Process(target=self.run, args=())
self.lock = lock
def stop(self):
self.should_stop = True
def start(self):
self.process.start()
def join(self):
self.process.join()
def run(self):
self.start_time = time.clock()
for _ in range(self.iterations):
#while time.clock() - self.start_time < 900:
#print("Thread:", self.threadID, "iteration:", _)
if(self.node_construction == ThreadRunner.N_DFS):
#constructVertexOrderDFS(self.graph)
pass
elif(self.node_construction == ThreadRunner.N_RND):
constructVertexOrderRandom(self.graph)
if(self.edge_construction == ThreadRunner.E_GRD):
constructSolutionGreedyLeastCrossings(self.graph, False)
elif(self.edge_construction == ThreadRunner.E_RND):
constructRandomEdgeAssignment(self.graph)
elif(self.edge_construction == ThreadRunner.E_GRD_RND):
#constructSolutionGreedyLeastCrossings(self.graph, True)
pass
if(self.local_search == ThreadRunner.LS_LS):
evaluator = Evaluator()
if self.neighborhood == ThreadRunner.LS_EDGEMOVE:
neighborhood = EdgePageMove(self.step, evaluator)
elif self.neighborhood == ThreadRunner.LS_NODEMOVE:
neighborhood = MoveNode(self.step, evaluator)
search = SimpleLocalSearch(neighborhood, evaluator)
neighborhood.reset(self.graph, self.step)
x = search.optimize(self.graph)
elif(self.local_search == ThreadRunner.LS_VND):
evaluator = TimedEvaluator(900)
n1 = EdgePageMove(Neighborhood.BEST, evaluator)
n2 = MoveNode(Neighborhood.BEST, evaluator)
vndsearch = VND([n1, n2], evaluator)
x = vndsearch.optimize(self.graph)
if(self.compare_to_best() == 0):
return
def compare_to_best(self):
self.lock.acquire()
num = self.graph.numCrossings()
self.crossing_nums.append(num)
self.best_solution[3] += 1
self.best_solution[4] = time.clock() - self.start_time
if(num < self.best_solution[0]):
self.best_solution[0] = num
self.best_solution[1] = self.graph.copy()
self.best_solution[2] = self.threadID
print("new best:", num, "on thread:", self.threadID)
if(self.best_solution[0] == 0):
self.lock.release()
return 0
self.lock.release()
return num | mit | -5,412,681,166,571,444,000 | 35.290323 | 166 | 0.647477 | false |
MziRintu/kitsune | kitsune/products/admin.py | 12 | 1523 | from django.contrib import admin
from kitsune.products.models import Platform, Product, Topic, Version
class ProductAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'display_order', 'visible')
list_display_links = ('title', 'slug')
list_editable = ('display_order', 'visible')
readonly_fields = ('id',)
prepopulated_fields = {'slug': ('title',)}
class TopicAdmin(admin.ModelAdmin):
def parent(obj):
return obj.parent
parent.short_description = 'Parent'
list_display = ('product', 'title', 'slug', parent, 'display_order',
'visible')
list_display_links = ('title', 'slug')
list_editable = ('display_order', 'visible')
list_filter = ('product', 'parent', 'slug')
readonly_fields = ('id',)
prepopulated_fields = {'slug': ('title',)}
class VersionAdmin(admin.ModelAdmin):
list_display = ('name', 'product', 'slug', 'min_version', 'max_version',
'visible', 'default')
list_display_links = ('name',)
list_editable = ('slug', 'visible', 'default', 'min_version',
'max_version')
list_filter = ('product', )
class PlatformAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'display_order', 'visible')
list_display_links = ('name', )
list_editable = ('slug', 'display_order', 'visible')
admin.site.register(Platform, PlatformAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(Version, VersionAdmin)
| bsd-3-clause | -4,337,025,609,275,909,000 | 32.108696 | 76 | 0.634274 | false |
mikmakmuk/syncevolution_n9 | test/runtests.py | 1 | 93413 | #!/usr/bin/python
"""
The general idea is that tests to run are defined as a list of
actions. Each action has a unique name and can depend on other
actions to have run successfully before.
Most work is executed in directories defined and owned by these
actions. The framework only manages one directory which represents
the result of each action:
- an overview file which lists the result of each action
- for each action a directory with stderr/out and additional files
that the action can put there
"""
import os, sys, popen2, traceback, re, time, smtplib, optparse, stat, shutil, StringIO, MimeWriter
import shlex
import subprocess
import fnmatch
import copy
try:
import gzip
havegzip = True
except:
havegzip = False
def cd(path):
"""Enter directories, creating them if necessary."""
if not os.access(path, os.F_OK):
os.makedirs(path)
os.chdir(path)
def abspath(path):
"""Absolute path after expanding vars and user."""
return os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
def findInPaths(name, dirs):
"""find existing item in one of the directories, return None if
no directories give, absolute path to existing item or (as fallbac)
last dir + name"""
fullname = None
for dir in dirs:
fullname = os.path.join(abspath(dir), name)
if os.access(fullname, os.F_OK):
break
return fullname
def del_dir(path):
if not os.access(path, os.F_OK):
return
for file in os.listdir(path):
file_or_dir = os.path.join(path,file)
# ensure directory is writable
os.chmod(path, os.stat(path)[stat.ST_MODE] | stat.S_IRWXU)
if os.path.isdir(file_or_dir) and not os.path.islink(file_or_dir):
del_dir(file_or_dir) #it's a directory recursive call to function again
else:
os.remove(file_or_dir) #it's a file, delete it
os.rmdir(path)
def copyLog(filename, dirname, htaccess, lineFilter=None):
"""Make a gzipped copy (if possible) with the original time stamps and find the most severe problem in it.
That line is then added as description in a .htaccess AddDescription.
For directories just copy the whole directory tree.
"""
info = os.stat(filename)
outname = os.path.join(dirname, os.path.basename(filename))
if os.path.isdir(filename):
# copy whole directory, without any further processing at the moment
shutil.copytree(filename, outname, symlinks=True)
return
# .out files are typically small nowadays, so don't compress
if False:
outname = outname + ".gz"
out = gzip.open(outname, "wb")
else:
out = file(outname, "w")
error = None
for line in file(filename, "r").readlines():
if not error and line.find("ERROR") >= 0:
error = line
if lineFilter:
line = lineFilter(line)
out.write(line)
out.close()
os.utime(outname, (info[stat.ST_ATIME], info[stat.ST_MTIME]))
if error:
error = error.strip().replace("\"", "'").replace("<", "<").replace(">",">")
htaccess.write("AddDescription \"%s\" %s\n" %
(error,
os.path.basename(filename)))
return error
def TryKill(pid, signal):
try:
os.kill(pid, signal)
except OSError, ex:
# might have quit in the meantime, deal with the race
# condition
if ex.errno != 3:
raise ex
def ShutdownSubprocess(popen, timeout):
start = time.time()
if popen.poll() == None:
TryKill(popen.pid, signal.SIGTERM)
while popen.poll() == None and start + timeout >= time.time():
time.sleep(0.01)
if popen.poll() == None:
TryKill(popen.pid, signal.SIGKILL)
while popen.poll() == None and start + timeout + 1 >= time.time():
time.sleep(0.01)
return False
return True
class Action:
"""Base class for all actions to be performed."""
DONE = "0 DONE"
WARNINGS = "1 WARNINGS"
FAILED = "2 FAILED"
TODO = "3 TODO"
SKIPPED = "4 SKIPPED"
COMPLETED = (DONE, WARNINGS)
def __init__(self, name):
self.name = name
self.status = self.TODO
self.summary = ""
self.dependencies = []
self.isserver = False;
def execute(self):
"""Runs action. Throws an exeception if anything fails.
Will be called by tryexecution() with stderr/stdout redirected into a file
and the current directory set to an empty temporary directory.
"""
raise Exception("not implemented")
def nop(self):
pass
def tryexecution(self, step, logs):
"""wrapper around execute which handles exceptions, directories and stdout"""
print "*** running action %s" % self.name
if logs:
fd = -1
oldstdout = os.dup(1)
oldstderr = os.dup(2)
oldout = sys.stdout
olderr = sys.stderr
cwd = os.getcwd()
try:
subdirname = "%d-%s" % (step, self.name)
del_dir(subdirname)
sys.stderr.flush()
sys.stdout.flush()
cd(subdirname)
if logs:
fd = os.open("output.txt", os.O_WRONLY|os.O_CREAT|os.O_TRUNC)
os.dup2(fd, 1)
os.dup2(fd, 2)
sys.stdout = os.fdopen(fd, "w")
sys.stderr = sys.stdout
print "=== starting %s ===" % (self.name)
self.execute()
self.status = Action.DONE
self.summary = "okay"
except Exception, inst:
traceback.print_exc()
self.status = Action.FAILED
self.summary = str(inst)
print "\n=== %s: %s ===" % (self.name, self.status)
sys.stdout.flush()
os.chdir(cwd)
if logs:
if fd >= 0:
sys.stdout.close()
os.dup2(oldstdout, 1)
os.dup2(oldstderr, 2)
sys.stderr = olderr
sys.stdout = oldout
os.close(oldstdout)
os.close(oldstderr)
return self.status
class Context:
"""Provides services required by actions and handles running them."""
def __init__(self, tmpdir, resultdir, uri, workdir, mailtitle, sender, recipients, mailhost, enabled, skip, nologs, setupcmd, make, sanitychecks, lastresultdir, datadir):
# preserve normal stdout because stdout/stderr will be redirected
self.out = os.fdopen(os.dup(1), "w")
self.todo = []
self.actions = {}
self.tmpdir = abspath(tmpdir)
self.resultdir = abspath(resultdir)
self.uri = uri
self.workdir = abspath(workdir)
self.summary = []
self.mailtitle = mailtitle
self.sender = sender
self.recipients = recipients
self.mailhost = mailhost
self.enabled = enabled
self.skip = skip
self.nologs = nologs
self.setupcmd = setupcmd
self.make = make
self.sanitychecks = sanitychecks
self.lastresultdir = lastresultdir
self.datadir = datadir
def findTestFile(self, name):
"""find item in SyncEvolution test directory, first using the
generated source of the current test, then the bootstrapping code"""
return findInPaths(name, (os.path.join(sync.basedir, "test"), self.datadir))
def runCommand(self, cmdstr, dumpCommands=False):
"""Log and run the given command, throwing an exception if it fails."""
cmd = shlex.split(cmdstr)
if "valgrindcheck.sh" in cmdstr:
cmd.insert(0, "VALGRIND_LOG=%s" % os.getenv("VALGRIND_LOG", ""))
cmd.insert(0, "VALGRIND_ARGS=%s" % os.getenv("VALGRIND_ARGS", ""))
cmd.insert(0, "VALGRIND_LEAK_CHECK_ONLY_FIRST=%s" % os.getenv("VALGRIND_LEAK_CHECK_ONLY_FIRST", ""))
cmd.insert(0, "VALGRIND_LEAK_CHECK_SKIP=%s" % os.getenv("VALGRIND_LEAK_CHECK_SKIP", ""))
# move "sudo" or "env" command invocation in front of
# all the leading env variable assignments: necessary
# because sudo ignores them otherwise
command = 0
isenv = re.compile(r'[a-zA-Z0-9_]*=.*')
while isenv.match(cmd[command]):
command = command + 1
if cmd[command] in ("env", "sudo"):
cmd.insert(0, cmd[command])
del cmd[command + 1]
cmdstr = " ".join(map(lambda x: (' ' in x or x == '') and ("'" in x and '"%s"' or "'%s'") % x or x, cmd))
if dumpCommands:
cmdstr = "set -x; " + cmdstr
print "*** ( cd %s; export %s; %s )" % (os.getcwd(),
" ".join(map(lambda x: "'%s=%s'" % (x, os.getenv(x, "")), [ "LD_LIBRARY_PATH" ])),
cmdstr)
sys.stdout.flush()
result = os.system(cmdstr)
if result != 0:
raise Exception("%s: failed (return code %d)" % (cmd, result>>8))
def add(self, action):
"""Add an action for later execution. Order is important, fifo..."""
self.todo.append(action)
self.actions[action.name] = action
def required(self, actionname):
"""Returns true if the action is required by one which is enabled."""
if actionname in self.enabled:
return True
for action in self.todo:
if actionname in action.dependencies and self.required(action.name):
return True
return False
def execute(self):
cd(self.resultdir)
s = open("output.txt", "w+")
status = Action.DONE
step = 0
run_servers=[];
while len(self.todo) > 0:
try:
step = step + 1
# get action
action = self.todo.pop(0)
# check whether it actually needs to be executed
if self.enabled and \
not action.name in self.enabled and \
not self.required(action.name):
# disabled
action.status = Action.SKIPPED
self.summary.append("%s skipped: disabled in configuration" % (action.name))
elif action.name in self.skip:
# assume that it was done earlier
action.status = Action.SKIPPED
self.summary.append("%s assumed to be done: requested by configuration" % (action.name))
else:
# check dependencies
for depend in action.dependencies:
if not self.actions[depend].status in Action.COMPLETED:
action.status = Action.SKIPPED
self.summary.append("%s skipped: required %s has not been executed" % (action.name, depend))
break
if action.status == Action.SKIPPED:
continue
# execute it
if action.isserver:
run_servers.append(action.name);
action.tryexecution(step, not self.nologs)
if action.status > status:
status = action.status
if action.status == Action.FAILED:
self.summary.append("%s: %s" % (action.name, action.summary))
elif action.status == Action.WARNINGS:
self.summary.append("%s done, but check the warnings" % action.name)
else:
self.summary.append("%s successful" % action.name)
except Exception, inst:
traceback.print_exc()
self.summary.append("%s failed: %s" % (action.name, inst))
# append all parameters to summary
self.summary.append("")
self.summary.extend(sys.argv)
# update summary
s.write("%s\n" % ("\n".join(self.summary)))
s.close()
# copy information about sources
for source in self.actions.keys():
action = self.actions[source]
basedir = getattr(action, 'basedir', None)
if basedir and os.path.isdir(basedir):
for file in os.listdir(os.path.join(basedir, "..")):
if fnmatch.fnmatch(file, source + '[.-]*'):
shutil.copyfile(os.path.join(basedir, "..", file),
os.path.join(self.resultdir, file))
# run testresult checker
#calculate the src dir where client-test can be located
srcdir = os.path.join(compile.builddir, "src")
backenddir = os.path.join(compile.installdir, "usr/lib/syncevolution/backends")
# resultchecker doesn't need valgrind, remove it
shell = re.sub(r'\S*valgrind\S*', '', options.shell)
prefix = re.sub(r'\S*valgrind\S*', '', options.testprefix)
uri = self.uri or ("file:///" + self.resultdir)
resultchecker = self.findTestFile("resultchecker.py")
compare = self.findTestFile("compare.xsl")
generateHTML = self.findTestFile("generate-html.xsl")
commands = []
# produce nightly.xml from plain text log files
commands.append(resultchecker + " " +self.resultdir+" "+"'"+",".join(run_servers)+"'"+" "+uri +" "+srcdir + " '" + shell + " " + testprefix +" '"+" '" +backenddir +"'")
previousxml = os.path.join(self.lastresultdir, "nightly.xml")
if os.path.exists(previousxml):
# compare current nightly.xml against previous file
commands.append("xsltproc -o " + self.resultdir + "/cmp_result.xml --stringparam cmp_file " + previousxml + " " + compare + " " + self.resultdir + "/nightly.xml")
# produce HTML with URLs relative to current directory of the nightly.html
commands.append("xsltproc -o " + self.resultdir + "/nightly.html --stringparam url . --stringparam cmp_result_file " + self.resultdir + "/cmp_result.xml " + generateHTML + " "+ self.resultdir+"/nightly.xml")
self.runCommand(" && ".join(commands))
# report result by email
if self.recipients:
server = smtplib.SMTP(self.mailhost)
msg=''
try:
msg = open(self.resultdir + "/nightly.html").read()
except IOError:
msg = '''<html><body><h1>Error: No HTML report generated!</h1></body></html>\n'''
# insert absolute URL into hrefs so that links can be opened directly in
# the mail reader
msg = re.sub(r'href="([a-zA-Z0-9./])',
'href="' + uri + r'/\1',
msg)
body = StringIO.StringIO()
writer = MimeWriter.MimeWriter (body)
writer.addheader("From", self.sender)
for recipient in self.recipients:
writer.addheader("To", recipient)
writer.addheader("Subject", self.mailtitle + ": " + os.path.basename(self.resultdir))
writer.addheader("MIME-Version", "1.0")
writer.flushheaders()
writer.startbody("text/html;charset=ISO-8859-1").write(msg)
failed = server.sendmail(self.sender, self.recipients, body.getvalue())
if failed:
print "could not send to: %s" % (failed)
sys.exit(1)
else:
print "\n".join(self.summary), "\n"
if status in Action.COMPLETED:
sys.exit(0)
else:
sys.exit(1)
# must be set before instantiating some of the following classes
context = None
class CVSCheckout(Action):
"""Does a CVS checkout (if directory does not exist yet) or an update (if it does)."""
def __init__(self, name, workdir, runner, cvsroot, module, revision):
"""workdir defines the directory to do the checkout in,
cvsroot the server, module the path to the files,
revision the tag to checkout"""
Action.__init__(self,name)
self.workdir = workdir
self.runner = runner
self.cvsroot = cvsroot
self.module = module
self.revision = revision
self.basedir = os.path.join(abspath(workdir), module)
def execute(self):
cd(self.workdir)
if os.access(self.module, os.F_OK):
os.chdir(self.module)
context.runCommand("cvs update -d -r %s" % (self.revision))
elif self.revision == "HEAD":
context.runCommand("cvs -d %s checkout %s" % (self.cvsroot, self.module))
os.chdir(self.module)
else:
context.runCommand("cvs -d %s checkout -r %s %s" % (self.cvsroot, self.revision, self.module))
os.chdir(self.module)
if os.access("autogen.sh", os.F_OK):
context.runCommand("%s ./autogen.sh" % (self.runner))
class SVNCheckout(Action):
"""Does a Subversion checkout (if directory does not exist yet) or a switch (if it does)."""
def __init__(self, name, workdir, runner, url, module):
"""workdir defines the directory to do the checkout in,
URL the server and path inside repository,
module the path to the files in the checked out copy"""
Action.__init__(self,name)
self.workdir = workdir
self.runner = runner
self.url = url
self.module = module
self.basedir = os.path.join(abspath(workdir), module)
def execute(self):
cd(self.workdir)
if os.access(self.module, os.F_OK):
cmd = "switch"
else:
cmd = "checkout"
context.runCommand("svn %s %s %s" % (cmd, self.url, self.module))
os.chdir(self.module)
if os.access("autogen.sh", os.F_OK):
context.runCommand("%s ./autogen.sh" % (self.runner))
class GitCheckoutBase:
"""Just sets some common properties for all Git checkout classes: workdir, basedir"""
def __init__(self, name, workdir):
self.workdir = workdir
self.basedir = os.path.join(abspath(workdir), name)
class GitCheckout(GitCheckoutBase, Action):
"""Does a git clone (if directory does not exist yet) or a fetch+checkout (if it does)."""
def __init__(self, name, workdir, runner, url, revision):
"""workdir defines the directory to do the checkout in with 'name' as name of the sub directory,
URL the server and repository,
revision the desired branch or tag"""
Action.__init__(self, name)
GitCheckoutBase.__init__(self, name)
self.runner = runner
self.url = url
self.revision = revision
def execute(self):
if os.access(self.basedir, os.F_OK):
cmd = "cd %s && git fetch" % (self.basedir)
else:
cmd = "git clone %s %s && chmod -R g+w %s && cd %s && git config core.sharedRepository group " % (self.url, self.basedir, self.basedir, self.basedir)
context.runCommand(cmd)
context.runCommand("set -x; cd %(dir)s && git show-ref &&"
"((git tag -l | grep -w -q %(rev)s) && git checkout %(rev)s ||"
"((git branch -l | grep -w -q %(rev)s) && git checkout %(rev)s || git checkout -b %(rev)s origin/%(rev)s) && git merge origin/%(rev)s)" %
{"dir": self.basedir,
"rev": self.revision})
os.chdir(self.basedir)
if os.access("autogen.sh", os.F_OK):
context.runCommand("%s ./autogen.sh" % (self.runner))
class GitCopy(GitCheckoutBase, Action):
"""Copy existing git repository and update it to the requested
branch, with local changes stashed before updating and restored
again afterwards. Automatically merges all branches with <branch>/
as prefix, skips those which do not apply cleanly."""
def __init__(self, name, workdir, runner, sourcedir, revision):
"""workdir defines the directory to create/update the repo in with 'name' as name of the sub directory,
sourcedir a directory which must contain such a repo already,
revision the desired branch or tag"""
Action.__init__(self, name)
GitCheckoutBase.__init__(self, name, workdir)
self.runner = runner
self.sourcedir = sourcedir
self.revision = revision
self.patchlog = os.path.join(abspath(workdir), name + "-source.log")
self.__getitem__ = lambda x: getattr(self, x)
def execute(self):
if not os.access(self.basedir, os.F_OK):
context.runCommand("(mkdir -p %s && cp -a -l %s/%s %s) || ( rm -rf %s && false )" %
(self.workdir, self.sourcedir, self.name, self.workdir, self.basedir))
os.chdir(self.basedir)
cmd = " && ".join([
'rm -f %(patchlog)s',
'echo "save local changes with stash under a fixed name <rev>-nightly"',
'rev=$(git stash create)',
'git branch -f %(revision)s-nightly ${rev:-HEAD}',
'echo "check out branch as "nightly" and integrate all proposed patches (= <revision>/... branches)"',
# switch to detached head, to allow removal of branches
'git checkout -q $( git show-ref --head --hash | head -1 )',
'if git branch | grep -q -w "^..%(revision)s$"; then git branch -D %(revision)s; fi',
'if git branch | grep -q -w "^..nightly$"; then git branch -D nightly; fi',
# fetch
'echo "remove stale merge branches and fetch anew"',
'git branch -r -D $( git branch -r | grep -e "/for-%(revision)s/" ) ',
'git branch -D $( git branch | grep -e "^ for-%(revision)s/" ) ',
'git fetch',
'git fetch --tags',
# pick tag or remote branch
'if git tag | grep -q -w %(revision)s; then base=%(revision)s; git checkout -f -b nightly %(revision)s; ' \
'else base=origin/%(revision)s; git checkout -f -b nightly origin/%(revision)s; fi',
# integrate remote branches first, followed by local ones;
# the hope is that local branches apply cleanly on top of the remote ones
'for patch in $( (git branch -r --no-merged origin/%(revision)s; git branch --no-merged origin/%(revision)s) | sed -e "s/^..//" | grep -e "^for-%(revision)s/" -e "/for-%(revision)s/" ); do ' \
'if git merge $patch; then echo >>%(patchlog)s $patch: okay; ' \
'else echo >>%(patchlog)s $patch: failed to apply; git reset --hard; fi; done',
'echo "restore <rev>-nightly and create permanent branch <rev>-nightly-before-<date>-<time> if that fails or new tree is different"',
# only apply stash when really a stash
'if ( git log -n 1 --oneline %(revision)s-nightly | grep -q " WIP on" && ! git stash apply %(revision)s-nightly ) || ! git diff --quiet %(revision)s-nightly..nightly; then ' \
'git branch %(revision)s-nightly-before-$(date +%%Y-%%m-%%d-%%H-%%M) %(revision)s-nightly; '
'fi',
'echo "document local patches"',
'rm -f ../%(name)s-*.patch',
'git format-patch -o .. $base..nightly',
'(cd ..; for i in [0-9]*.patch; do [ ! -f "$i" ] || mv $i %(name)s-$i; done)',
'git describe --tags --always nightly | sed -e "s/\(.*\)-\([0-9][0-9]*\)-g\(.*\)/\\1 + \\2 commit(s) = \\3/" >>%(patchlog)s',
'( git status | grep -q "working directory clean" && echo "working directory clean" || ( echo "working directory dirty" && ( echo From: nightly testing ; echo Subject: [PATCH 1/1] uncommitted changes ; echo ; git status; echo; git diff HEAD ) >../%(name)s-1000-unstaged.patch ) ) >>%(patchlog)s'
]) % self
context.runCommand(cmd, dumpCommands=True)
if os.access("autogen.sh", os.F_OK):
context.runCommand("%s ./autogen.sh" % (self.runner))
class AutotoolsBuild(Action):
def __init__(self, name, src, configargs, runner, dependencies):
"""Runs configure from the src directory with the given arguments.
runner is a prefix for the configure command and can be used to setup the
environment."""
Action.__init__(self, name)
self.src = src
self.configargs = configargs
self.runner = runner
self.dependencies = dependencies
self.installdir = os.path.join(context.tmpdir, "install")
self.builddir = os.path.join(context.tmpdir, "build")
def execute(self):
del_dir(self.builddir)
cd(self.builddir)
context.runCommand("%s %s/configure %s" % (self.runner, self.src, self.configargs))
context.runCommand("%s %s install DESTDIR=%s" % (self.runner, context.make, self.installdir))
class SyncEvolutionTest(Action):
def __init__(self, name, build, serverlogs, runner, tests, sources, testenv="", lineFilter=None, testPrefix="", serverName="", testBinary="./client-test"):
"""Execute TestEvolution for all (empty tests) or the
selected tests."""
Action.__init__(self, name)
self.isserver = True
self.build = build
self.srcdir = os.path.join(build.builddir, "src")
self.serverlogs = serverlogs
self.runner = runner
self.tests = tests
self.sources = sources
self.testenv = testenv
if build.name:
self.dependencies.append(build.name)
self.lineFilter = lineFilter
self.testPrefix = testPrefix
self.serverName = serverName
if not self.serverName:
self.serverName = name
self.testBinary = testBinary
self.alarmSeconds = 1200
def execute(self):
resdir = os.getcwd()
os.chdir(self.build.builddir)
# clear previous test results
context.runCommand("%s %s testclean" % (self.runner, context.make))
os.chdir(self.srcdir)
try:
# use installed backends if available
backenddir = os.path.join(self.build.installdir, "usr/lib/syncevolution/backends")
if not os.access(backenddir, os.F_OK):
# fallback: relative to client-test inside the current directory
backenddir = "backends"
# same with configs and templates, except that they use the source as fallback
confdir = os.path.join(self.build.installdir, "usr/share/syncevolution/xml")
if not os.access(confdir, os.F_OK):
confdir = os.path.join(sync.basedir, "src/syncevo/configs")
templatedir = os.path.join(self.build.installdir, "usr/share/syncevolution/templates")
if not os.access(templatedir, os.F_OK):
templatedir = os.path.join(sync.basedir, "src/templates")
datadir = os.path.join(self.build.installdir, "usr/share/syncevolution")
if not os.access(datadir, os.F_OK):
# fallback works for bluetooth_products.ini but will fail for other files
datadir = os.path.join(sync.basedir, "src/dbus/server")
installenv = \
"SYNCEVOLUTION_DATA_DIR=%s "\
"SYNCEVOLUTION_TEMPLATE_DIR=%s " \
"SYNCEVOLUTION_XML_CONFIG_DIR=%s " \
"SYNCEVOLUTION_BACKEND_DIR=%s " \
% ( datadir, templatedir, confdir, backenddir )
cmd = "%s %s %s %s %s ./syncevolution" % (self.testenv, installenv, self.runner, context.setupcmd, self.name)
context.runCommand(cmd)
# proxy must be set in test config! Necessary because not all tests work with the env proxy (local CalDAV, for example).
basecmd = "http_proxy= " \
"CLIENT_TEST_SERVER=%(server)s " \
"CLIENT_TEST_SOURCES=%(sources)s " \
"SYNC_EVOLUTION_EVO_CALENDAR_DELAY=1 " \
"CLIENT_TEST_ALARM=%(alarm)d " \
"%(env)s %(installenv)s" \
"CLIENT_TEST_LOG=%(log)s " \
"CLIENT_TEST_EVOLUTION_PREFIX=%(evoprefix)s " \
"%(runner)s " \
"env LD_LIBRARY_PATH=build-synthesis/src/.libs:.libs:syncevo/.libs:gdbus/.libs:gdbusxx/.libs:$LD_LIBRARY_PATH PATH=backends/webdav:.:$PATH %(testprefix)s " \
"%(testbinary)s" % \
{ "server": self.serverName,
"sources": ",".join(self.sources),
"alarm": self.alarmSeconds,
"env": self.testenv,
"installenv": installenv,
"log": self.serverlogs,
"evoprefix": context.databasePrefix,
"runner": self.runner,
"testbinary": self.testBinary,
"testprefix": self.testPrefix }
enabled = context.enabled.get(self.name)
if not enabled:
enabled = self.tests
enabled = re.split("[ ,]", enabled.strip())
if enabled:
tests = []
for test in enabled:
if test == "Client::Sync" and context.sanitychecks:
# Replace with one simpler, faster testItems test, but be careful to
# pick an enabled source and the right mode (XML vs. WBXML).
# The first listed source and WBXML should be safe.
tests.append("Client::Sync::%s::testItems" % self.sources[0])
else:
tests.append(test)
context.runCommand("%s %s" % (basecmd, " ".join(tests)))
else:
context.runCommand(basecmd)
finally:
tocopy = re.compile(r'.*\.log|.*\.client.[AB]|.*\.(cpp|h|c)\.html|.*\.log\.html')
toconvert = re.compile(r'Client_.*\.log')
htaccess = file(os.path.join(resdir, ".htaccess"), "a")
for f in os.listdir(self.srcdir):
if tocopy.match(f):
error = copyLog(f, resdir, htaccess, self.lineFilter)
if toconvert.match(f):
# also convert client-test log files to HTML
tohtml = os.path.join(resdir, f + ".html")
os.system("env PATH=.:$PATH synclog2html %s >%s" % (f, tohtml))
basehtml = f + ".html"
if os.path.exists(basehtml):
os.unlink(basehtml)
os.symlink(tohtml, basehtml)
if error:
htaccess.write('AddDescription "%s" %s\n' % (error, basehtml))
###################################################################
# Configuration part
###################################################################
parser = optparse.OptionParser()
parser.add_option("-e", "--enable",
action="append", type="string", dest="enabled", default=[],
help="use this to enable specific actions instead of executing all of them (can be used multiple times and accepts enable=test1,test2 test3,... test lists)")
parser.add_option("-n", "--no-logs",
action="store_true", dest="nologs",
help="print to stdout/stderr directly instead of redirecting into log files")
parser.add_option("-l", "--list",
action="store_true", dest="list",
help="list all available actions")
parser.add_option("-s", "--skip",
action="append", type="string", dest="skip", default=[],
help="instead of executing this action assume that it completed earlier (can be used multiple times)")
parser.add_option("", "--tmp",
type="string", dest="tmpdir", default="",
help="temporary directory for intermediate files")
parser.add_option("", "--workdir",
type="string", dest="workdir", default=None,
help="directory for files which might be reused between runs")
parser.add_option("", "--database-prefix",
type="string", dest="databasePrefix", default="Test_",
help="defines database names (<prefix>_<type>_1/2), must exist")
parser.add_option("", "--resultdir",
type="string", dest="resultdir", default="",
help="directory for log files and results")
parser.add_option("", "--lastresultdir",
type="string", dest="lastresultdir", default="",
help="directory for last day's log files and results")
parser.add_option("", "--datadir",
type="string", dest="datadir", default=os.path.dirname(os.path.abspath(os.path.expanduser(os.path.expandvars(sys.argv[0])))),
help="directory for files used by report generation")
parser.add_option("", "--resulturi",
type="string", dest="uri", default=None,
help="URI that corresponds to --resultdir, if given this is used in mails instead of --resultdir")
parser.add_option("", "--shell",
type="string", dest="shell", default="",
help="a prefix which is put in front of a command to execute it (can be used for e.g. run_garnome)")
parser.add_option("", "--test-prefix",
type="string", dest="testprefix", default="",
help="a prefix which is put in front of client-test (e.g. valgrind)")
parser.add_option("", "--sourcedir",
type="string", dest="sourcedir", default=None,
help="directory which contains 'syncevolution' and 'libsynthesis' code repositories; if given, those repositories will be used as starting point for testing instead of checking out directly")
parser.add_option("", "--no-sourcedir-copy",
action="store_true", dest="nosourcedircopy", default=False,
help="instead of copying the content of --sourcedir and integrating patches automatically, use the content directly")
parser.add_option("", "--sourcedir-copy",
action="store_false", dest="nosourcedircopy",
help="reverts a previous --no-sourcedir-copy")
parser.add_option("", "--syncevo-tag",
type="string", dest="syncevotag", default="master",
help="the tag of SyncEvolution (e.g. syncevolution-0.7, default is 'master'")
parser.add_option("", "--synthesis-tag",
type="string", dest="synthesistag", default="master",
help="the tag of the synthesis library (default = master in the moblin.org repo)")
parser.add_option("", "--activesyncd-tag",
type="string", dest="activesyncdtag", default="master",
help="the tag of the activesyncd (default = master)")
parser.add_option("", "--configure",
type="string", dest="configure", default="",
help="additional parameters for configure")
parser.add_option("", "--openembedded",
type="string", dest="oedir",
help="the build directory of the OpenEmbedded cross-compile environment")
parser.add_option("", "--host",
type="string", dest="host",
help="platform identifier like x86_64-linux; if this and --openembedded is set, then cross-compilation is tested")
parser.add_option("", "--bin-suffix",
type="string", dest="binsuffix", default="",
help="string to append to name of binary .tar.gz distribution archive (default empty = no binary distribution built)")
parser.add_option("", "--package-suffix",
type="string", dest="packagesuffix", default="",
help="string to insert into package name (default empty = no binary distribution built)")
parser.add_option("", "--synthesis",
type="string", dest="synthesisdir", default="",
help="directory with Synthesis installation")
parser.add_option("", "--funambol",
type="string", dest="funamboldir", default="/scratch/Funambol",
help="directory with Funambol installation")
parser.add_option("", "--from",
type="string", dest="sender",
help="sender of email if recipients are also specified")
parser.add_option("", "--to",
action="append", type="string", dest="recipients",
help="recipient of result email (option can be given multiple times)")
parser.add_option("", "--mailhost",
type="string", dest="mailhost", default="localhost",
help="SMTP mail server to be used for outgoing mail")
parser.add_option("", "--subject",
type="string", dest="subject", default="SyncML Tests " + time.strftime("%Y-%m-%d %H-%M"),
help="subject of result email (default is \"SyncML Tests <date> <time>\"")
parser.add_option("", "--evosvn",
action="append", type="string", dest="evosvn", default=[],
help="<name>=<path>: compiles Evolution from source under a short name, using Paul Smith's Makefile and config as found in <path>")
parser.add_option("", "--prebuilt",
action="store", type="string", dest="prebuilt", default=None,
help="a directory where SyncEvolution was build before: enables testing using those binaries (can be used once, instead of compiling)")
parser.add_option("", "--setup-command",
type="string", dest="setupcmd",
help="invoked with <test name> <args to start syncevolution>, should setup local account for the test")
parser.add_option("", "--make-command",
type="string", dest="makecmd", default="make",
help="command to use instead of plain make, for example 'make -j'")
parser.add_option("", "--sanity-checks",
action="store_true", dest="sanitychecks", default=False,
help="run limited number of sanity checks instead of full set")
(options, args) = parser.parse_args()
if options.recipients and not options.sender:
print "sending email also requires sender argument"
sys.exit(1)
# accept --enable foo[=args]
enabled = {}
for option in options.enabled:
l = option.split("=", 1)
if len(l) == 2:
enabled[l[0]] = l[1]
else:
enabled[option] = None
context = Context(options.tmpdir, options.resultdir, options.uri, options.workdir,
options.subject, options.sender, options.recipients, options.mailhost,
enabled, options.skip, options.nologs, options.setupcmd,
options.makecmd, options.sanitychecks, options.lastresultdir, options.datadir)
context.databasePrefix = options.databasePrefix
class EvoSvn(Action):
"""Builds Evolution from SVN using Paul Smith's Evolution Makefile."""
def __init__(self, name, workdir, resultdir, makedir, makeoptions):
"""workdir defines the directory to do the build in,
makedir is the directory which contains the Makefile and its local.mk,
makeoptions contain additional parameters for make (like BRANCH=2.20 PREFIX=/tmp/runtests/evo)."""
Action.__init__(self,name)
self.workdir = workdir
self.resultdir = resultdir
self.makedir = makedir
self.makeoptions = makeoptions
def execute(self):
cd(self.workdir)
shutil.copy2(os.path.join(self.makedir, "Makefile"), ".")
shutil.copy2(os.path.join(self.makedir, "local.mk"), ".")
if os.access(self.resultdir, os.F_OK):
shutil.rmtree(self.resultdir)
os.system("rm -f .stamp/*.install")
localmk = open("local.mk", "a")
localmk.write("PREFIX := %s\n" % self.resultdir)
localmk.close()
if os.access(".stamp", os.F_OK):
context.runCommand("make check-changelog")
context.runCommand("%s %s" % (context.make, self.makeoptions))
for evosvn in options.evosvn:
name, path = evosvn.split("=")
evosvn = EvoSvn("evolution" + name,
os.path.join(options.tmpdir, "evolution%s-build" % name),
os.path.join(options.tmpdir, "evolution%s-result" % name),
path,
"SUDO=true")
context.add(evosvn)
class SyncEvolutionCheckout(GitCheckout):
def __init__(self, name, revision):
"""checkout SyncEvolution"""
GitCheckout.__init__(self,
name, context.workdir,
# parameter to autogen.sh in SyncEvolution: also
# check for clean Synthesis source
"SYNTHESISSRC=../libsynthesis %s" % options.shell,
"[email protected]:meego-middleware/syncevolution.git",
revision)
class SynthesisCheckout(GitCheckout):
def __init__(self, name, revision):
"""checkout libsynthesis"""
GitCheckout.__init__(self,
name, context.workdir, options.shell,
"[email protected]:meego-middleware/libsynthesis.git",
revision)
class ActiveSyncDCheckout(GitCheckout):
def __init__(self, name, revision):
"""checkout activesyncd"""
GitCheckout.__init__(self,
name, context.workdir, options.shell,
"git://git.gnome.org/evolution-activesync",
revision)
class SyncEvolutionBuild(AutotoolsBuild):
def execute(self):
AutotoolsBuild.execute(self)
# LDFLAGS=-no-install is needed to ensure that the resulting
# client-test is a normal, usable executable. Otherwise we
# can have the following situation:
# - A wrapper script is created on the reference platform.
# - It is never executed there, which means that it won't
# produce the final .libs/lt-client-test executable
# (done on demand by libtool wrapper).
# - The wrapper script is invokved for the first time
# on some other platform, it tries to link, but fails
# because libs are different.
context.runCommand("%s %s src/client-test CXXFLAGS='-O0 -g' LDFLAGS=-no-install" % (self.runner, context.make))
class NopAction(Action):
def __init__(self, name):
Action.__init__(self, name)
self.status = Action.DONE
self.execute = self.nop
class NopSource(GitCheckoutBase, NopAction):
def __init__(self, name, sourcedir):
NopAction.__init__(self, name)
GitCheckoutBase.__init__(self, name, sourcedir)
if options.sourcedir:
if options.nosourcedircopy:
libsynthesis = NopSource("libsynthesis", options.sourcedir)
else:
libsynthesis = GitCopy("libsynthesis",
options.workdir,
options.shell,
options.sourcedir,
options.synthesistag)
else:
libsynthesis = SynthesisCheckout("libsynthesis", options.synthesistag)
context.add(libsynthesis)
if options.sourcedir:
if options.nosourcedircopy:
activesyncd = NopSource("activesyncd", options.sourcedir)
else:
activesyncd = GitCopy("activesyncd",
options.workdir,
options.shell,
options.sourcedir,
options.activesyncdtag)
else:
activesyncd = ActiveSyncDCheckout("activesyncd", options.activesyncdtag)
context.add(activesyncd)
if options.sourcedir:
if options.nosourcedircopy:
sync = NopSource("syncevolution", options.sourcedir)
else:
sync = GitCopy("syncevolution",
options.workdir,
"SYNTHESISSRC=%s %s" % (libsynthesis.basedir, options.shell),
options.sourcedir,
options.syncevotag)
else:
sync = SyncEvolutionCheckout("syncevolution", options.syncevotag)
context.add(sync)
source = []
if options.synthesistag:
source.append("--with-synthesis-src=%s" % libsynthesis.basedir)
if options.activesyncdtag:
source.append("--with-activesyncd-src=%s" % activesyncd.basedir)
# determine where binaries come from:
# either compile anew or prebuilt
if options.prebuilt:
compile = NopAction("compile")
compile.builddir = options.prebuilt
compile.installdir = os.path.join(options.prebuilt, "../install")
else:
compile = SyncEvolutionBuild("compile",
sync.basedir,
"%s %s" % (options.configure, " ".join(source)),
options.shell,
[ libsynthesis.name, sync.name ])
context.add(compile)
class SyncEvolutionCross(AutotoolsBuild):
def __init__(self, syncevosrc, synthesissrc, host, oedir, dependencies):
"""cross-compile SyncEvolution using a certain OpenEmbedded build dir:
host is the platform identifier (e.g. x86_64-linux),
oedir must contain the 'tmp/cross' and 'tmp/staging/<host>' directories"""
if synthesissrc:
synthesis_source = "--with-funambol-src=%s" % synthesissrc
else:
synthesis_source = ""
AutotoolsBuild.__init__(self, "cross-compile", syncevosrc, \
"--host=%s %s CPPFLAGS=-I%s/tmp/staging/%s/include/ LDFLAGS='-Wl,-rpath-link=%s/tmp/staging/%s/lib/ -Wl,--allow-shlib-undefined'" % \
( host, synthesis_source, oedir, host, oedir, host ), \
"PKG_CONFIG_PATH=%s/tmp/staging/%s/share/pkgconfig PATH=%s/tmp/cross/bin:$PATH" % \
( oedir, host, oedir ),
dependencies)
self.builddir = os.path.join(context.tmpdir, host)
def execute(self):
AutotoolsBuild.execute(self)
if options.oedir and options.host:
cross = SyncEvolutionCross(sync.basedir, libsynthesis.basedir, options.host, options.oedir, [ libsynthesis.name, sync.name, compile.name ])
context.add(cross)
class SyncEvolutionDist(AutotoolsBuild):
def __init__(self, name, binsuffix, packagesuffix, binrunner, dependencies):
"""Builds a normal and a binary distribution archive in a directory where
SyncEvolution was configured and compiled before.
"""
AutotoolsBuild.__init__(self, name, "", "", binrunner, dependencies)
self.binsuffix = binsuffix
self.packagesuffix = packagesuffix
def execute(self):
cd(self.builddir)
if self.packagesuffix:
context.runCommand("%s %s BINSUFFIX=%s deb rpm" % (self.runner, context.make, self.packagesuffix))
put, get = os.popen4("%s dpkg-architecture -qDEB_HOST_ARCH" % (self.runner))
for arch in get.readlines():
if "i386" in arch:
context.runCommand("%s %s BINSUFFIX=%s PKGARCH=lpia deb" % (self.runner, context.make, self.packagesuffix))
break
if self.binsuffix:
context.runCommand("%s %s BINSUFFIX=%s distbin" % (self.runner, context.make, self.binsuffix))
context.runCommand("%s %s distcheck" % (self.runner, context.make))
context.runCommand("%s %s DISTCHECK_CONFIGURE_FLAGS=--enable-gui distcheck" % (self.runner, context.make))
context.runCommand("%s %s 'DISTCHECK_CONFIGURE_FLAGS=--disable-ecal --disable-ebook' distcheck" % (self.runner, context.make))
dist = SyncEvolutionDist("dist",
options.binsuffix,
options.packagesuffix,
options.shell,
[ compile.name ])
context.add(dist)
evolutiontest = SyncEvolutionTest("evolution", compile,
"", options.shell,
"Client::Source SyncEvolution",
[],
"CLIENT_TEST_FAILURES="
# testReadItem404 works with some Akonadi versions (Ubuntu Lucid),
# but not all (Debian Testing). The other tests always fail,
# the code needs to be fixed.
"Client::Source::kde_.*::testReadItem404,"
"Client::Source::kde_.*::testDelete404,"
"Client::Source::kde_.*::testImport.*,"
"Client::Source::kde_.*::testRemoveProperties,"
" "
"CLIENT_TEST_SKIP="
"Client::Source::file_event::LinkedItemsDefault::testLinkedItemsInsertBothUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsDefault::testLinkedItemsUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsWithVALARM::testLinkedItemsInsertBothUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsWithVALARM::testLinkedItemsUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsAllDay::testLinkedItemsInsertBothUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsAllDay::testLinkedItemsUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsNoTZ::testLinkedItemsInsertBothUpdateChildNoIDs,"
"Client::Source::file_event::LinkedItemsNoTZ::testLinkedItemsUpdateChildNoIDs",
testPrefix=options.testprefix)
context.add(evolutiontest)
# test-dbus.py itself doesn't need to run under valgrind, remove it...
shell = re.sub(r'\S*valgrind\S*', '', options.shell)
testprefix = re.sub(r'\S*valgrind\S*', '', options.testprefix)
dbustest = SyncEvolutionTest("dbus", compile,
"", shell,
"",
[],
# ... but syncevo-dbus-server started by test-dbus.py should use valgrind
testenv="TEST_DBUS_PREFIX='%s'" % options.testprefix,
testPrefix=testprefix,
testBinary=os.path.join(sync.basedir,
"test",
"test-dbus.py -v"))
context.add(dbustest)
test = SyncEvolutionTest("googlecalendar", compile,
"", options.shell,
"Client::Sync::eds_event::testItems Client::Source::google_caldav",
[ "google_caldav", "eds_event" ],
"CLIENT_TEST_WEBDAV='google caldav testcases=testcases/google_event.ics' "
"CLIENT_TEST_NUM_ITEMS=10 " # don't stress server
"CLIENT_TEST_SIMPLE_UID=1 " # server gets confused by UID with special characters
"CLIENT_TEST_UNIQUE_UID=1 " # server keeps backups and restores old data unless UID is unieque
"CLIENT_TEST_MODE=server " # for Client::Sync
"CLIENT_TEST_FAILURES="
# http://code.google.com/p/google-caldav-issues/issues/detail?id=61 "cannot remove detached recurrence"
"Client::Source::google_caldav::LinkedItemsDefault::testLinkedItemsRemoveNormal,"
"Client::Source::google_caldav::LinkedItemsNoTZ::testLinkedItemsRemoveNormal,"
"Client::Source::google_caldav::LinkedItemsWithVALARM::testLinkedItemsRemoveNormal,"
"Client::Source::google_caldav::LinkedItemsAllDayGoogle::testLinkedItemsRemoveNormal,"
,
testPrefix=options.testprefix)
context.add(test)
test = SyncEvolutionTest("yahoo", compile,
"", options.shell,
"Client::Sync::eds_contact::testItems Client::Sync::eds_event::testItems Client::Source::yahoo_caldav Client::Source::yahoo_carddav",
[ "yahoo_caldav", "yahoo_carddav", "eds_event", "eds_contact" ],
"CLIENT_TEST_WEBDAV='yahoo caldav carddav carddav/testcases=testcases/yahoo_contact.vcf' "
"CLIENT_TEST_NUM_ITEMS=10 " # don't stress server
"CLIENT_TEST_SIMPLE_UID=1 " # server gets confused by UID with special characters
"CLIENT_TEST_MODE=server " # for Client::Sync
,
testPrefix=options.testprefix)
context.add(test)
test = SyncEvolutionTest("oracle", compile,
"", options.shell,
"Client::Sync::eds_contact::testItems Client::Sync::eds_event::testItems Client::Source::oracle_caldav Client::Source::oracle_carddav",
[ "oracle_caldav", "oracle_carddav", "eds_event", "eds_contact" ],
"CLIENT_TEST_WEBDAV='oracle caldav carddav' "
"CLIENT_TEST_NUM_ITEMS=10 " # don't stress server
"CLIENT_TEST_MODE=server " # for Client::Sync
,
testPrefix=options.testprefix)
context.add(test)
test = SyncEvolutionTest("egroupware-dav", compile,
"", options.shell,
"Client::Sync::eds_contact::testItems Client::Sync::eds_event::testItems Client::Source::egroupware-dav_caldav Client::Source::egroupware-dav_carddav",
[ "egroupware-dav_caldav", "egroupware-dav_carddav", "eds_event", "eds_contact" ],
"CLIENT_TEST_WEBDAV='egroupware-dav caldav carddav' "
"CLIENT_TEST_NUM_ITEMS=10 " # don't stress server
"CLIENT_TEST_MODE=server " # for Client::Sync
,
testPrefix=options.testprefix)
context.add(test)
test = SyncEvolutionTest("davical", compile,
"", options.shell,
"Client::Sync::eds_contact Client::Sync::eds_event Client::Sync::eds_task Client::Source::davical_caldav Client::Source::davical_caldavtodo Client::Source::davical_carddav",
[ "davical_caldav", "davical_caldavtodo", "davical_carddav", "eds_event", "eds_task", "eds_contact" ],
"CLIENT_TEST_WEBDAV='davical caldav caldavtodo carddav' "
"CLIENT_TEST_NUM_ITEMS=10 " # don't stress server
"CLIENT_TEST_SIMPLE_UID=1 " # server gets confused by UID with special characters
"CLIENT_TEST_MODE=server " # for Client::Sync
,
testPrefix=options.testprefix)
context.add(test)
test = SyncEvolutionTest("apple", compile,
"", options.shell,
"Client::Sync::eds_event Client::Sync::eds_task Client::Sync::eds_contact Client::Source::apple_caldav Client::Source::apple_caldavtodo Client::Source::apple_carddav",
[ "apple_caldav", "apple_caldavtodo", "apple_carddav", "eds_event", "eds_task", "eds_contact" ],
"CLIENT_TEST_WEBDAV='apple caldav caldavtodo carddav' "
"CLIENT_TEST_NUM_ITEMS=250 " # test is local, so we can afford a higher number
"CLIENT_TEST_MODE=server " # for Client::Sync
,
testPrefix=options.testprefix)
# but even with a local server does the test run a long time
test.alarmSeconds = 2400
context.add(test)
class ActiveSyncTest(SyncEvolutionTest):
def __init__(self, name, sources = [ "eas_event", "eas_contact", "eds_event", "eds_contact" ],
env = "",
knownFailures = []):
tests = []
if "eds_event" in sources:
tests.append("Client::Sync::eds_event")
if "eds_contact" in sources:
tests.append("Client::Sync::eds_contact")
if "eas_event" in sources:
tests.append("Client::Source::eas_event")
if "eas_contact" in sources:
tests.append("Client::Source::eas_contact")
SyncEvolutionTest.__init__(self, name,
compile,
"", options.shell,
tests,
sources,
env +
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_MODE=server " # for Client::Sync
"EAS_SOUP_LOGGER=1 "
"EAS_DEBUG=5 "
"EAS_DEBUG_DETACHED_RECURRENCES=1 "
"CLIENT_TEST_FAILURES=" +
",".join(knownFailures +
# time zone mismatch between client and server,
# still need to investigate
[ ".*::LinkedItemsWeekly::testSubsetStart11Skip[0-3]",
".*::LinkedItemsWeekly::testSubsetStart22Skip[1-3]",
".*::LinkedItemsWeekly::testSubsetStart33Skip[1-3]",
".*::LinkedItemsWeekly::testSubsetStart44.*" ] +
# The disables the synccompare simplifications for
# BDAY and friends, and therefore fails.
[ ".*::testExtensions" ]
) +
" "
"CLIENT_TEST_SKIP="
# See "[SyncEvolution] one-way sync + sync tokens not updated":
# one-way sync keeps using old (and obsolete) sync keys,
# thus running into unexpected slow syncs with ActiveSync.
"Client::Sync::.*::testOneWayFromClient,"
"Client::Sync::.*::testOneWayFromLocal,"
" "
"CLIENT_TEST_LOG=activesyncd.log "
,
testPrefix=" ".join(("env EAS_DEBUG_FILE=activesyncd.log",
os.path.join(sync.basedir, "test", "wrappercheck.sh"),
options.testprefix,
os.path.join(compile.builddir, "src", "backends", "activesync", "activesyncd", "install", "libexec", "activesyncd"),
"--",
options.testprefix)))
def executeWithActiveSync(self):
'''start and stop activesyncd before/after running the test'''
args = []
if options.testprefix:
args.append(options.testprefix)
args.append(os.path.join(compile.builddir, "src", "backends", "activesync", "activesyncd", "install", "libexec", "activesyncd"))
env = copy.deepcopy(os.environ)
env['EAS_SOUP_LOGGER'] = '1'
env['EAS_DEBUG'] = '5'
env['EAS_DEBUG_DETACHED_RECURRENCES'] = '1'
activesyncd = subprocess.Popen(args,
env=env)
try:
SyncEvolutionTest.execute(self)
finally:
if not ShutdownSubprocess(activesyncd, 5):
raise Exception("activesyncd had to be killed with SIGKILL")
returncode = activesyncd.poll()
if returncode != None:
if returncode != 0:
raise Exception("activesyncd returned %d" % returncode)
else:
raise Exception("activesyncd did not return")
test = ActiveSyncTest("exchange")
context.add(test)
test = ActiveSyncTest("googleeas",
["eds_contact", "eas_contact"],
env="CLIENT_TEST_DELAY=10 CLIENT_TEST_SOURCE_DELAY=10 ",
knownFailures=[
# Google does not support the Fetch operation, leading
# to an unhandled generic error.
".*::testReadItem404",
# Remove of PHOTO not supported by Google (?),
# works with Exchange.
"Client::Source::eas_contact::testRemoveProperties",
])
context.add(test)
syncevoPrefix=" ".join([os.path.join(sync.basedir, "test", "wrappercheck.sh")] +
# redirect output of command run under valgrind (when
# using valgrind) or of the whole command (otherwise)
# to syncevohttp.log
( 'valgrindcheck' in options.testprefix and \
[ "VALGRIND_CMD_LOG=syncevohttp.log" ] or \
[ "--daemon-log", "syncevohttp.log" ] ) +
[ options.testprefix,
os.path.join(compile.installdir, "usr", "libexec", "syncevo-dbus-server"),
"--",
os.path.join(sync.basedir, "test", "wrappercheck.sh"),
# also redirect additional syncevo-http-server
# output into the same file
"--daemon-log", "syncevohttp.log",
os.path.join(compile.installdir, "usr", "bin", "syncevo-http-server"),
"--quiet",
"http://127.0.0.1:9999/syncevolution",
"--",
options.testprefix])
# The test uses EDS on the clients and a server config with file
# backends - normal tests.
test = SyncEvolutionTest("edsfile",
compile,
"", options.shell,
"Client::Sync::eds_event Client::Sync::eds_contact Client::Sync::eds_event_eds_contact",
[ "eds_event", "eds_contact" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_LOG=syncevohttp.log "
# Slow, and running many syncs still fails when using
# valgrind. Tested separately below in "edsxfile".
# "CLIENT_TEST_RETRY=t "
# "CLIENT_TEST_RESEND=t "
# "CLIENT_TEST_SUSPEND=t "
# server supports refresh-from-client, use it for
# more efficient test setup
"CLIENT_TEST_DELETE_REFRESH=1 "
# server supports multiple cycles inside the same session
"CLIENT_TEST_PEER_CAN_RESTART=1 "
# server cannot detect pairs based on UID/RECURRENCE-ID
"CLIENT_TEST_ADD_BOTH_SIDES_SERVER_IS_DUMB=1 "
"CLIENT_TEST_SKIP="
,
testPrefix=syncevoPrefix)
context.add(test)
# The test uses EDS on the clients and a server config with file
# backends - suspend/retry/resend tests.
test = SyncEvolutionTest("edsxfile",
compile,
"", options.shell,
"Client::Sync::eds_contact::Retry Client::Sync::eds_contact::Resend Client::Sync::eds_contact::Suspend",
[ "eds_contact" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_LOG=syncevohttp.log "
"CLIENT_TEST_RETRY=t "
"CLIENT_TEST_RESEND=t "
"CLIENT_TEST_SUSPEND=t "
# server supports refresh-from-client, use it for
# more efficient test setup
"CLIENT_TEST_DELETE_REFRESH=1 "
# server supports multiple cycles inside the same session
"CLIENT_TEST_PEER_CAN_RESTART=1 "
# server cannot detect pairs based on UID/RECURRENCE-ID
"CLIENT_TEST_ADD_BOTH_SIDES_SERVER_IS_DUMB=1 "
"CLIENT_TEST_SKIP="
,
testPrefix=syncevoPrefix)
# a lot of syncs per test
test.alarmSeconds = 6000
context.add(test)
# This one uses CalDAV/CardDAV in DAViCal and the same server config
# with file backends as edsfile.
test = SyncEvolutionTest("davfile",
compile,
"", options.shell,
"Client::Sync::davical_caldav Client::Sync::davical_caldavtodo Client::Sync::davical_carddav Client::Sync::davical_caldav_davical_caldavtodo_davical_carddav",
[ "davical_caldav", "davical_caldavtodo", "davical_carddav" ],
"CLIENT_TEST_SIMPLE_UID=1 " # DAViCal server gets confused by UID with special characters
"CLIENT_TEST_WEBDAV='davical caldav caldavtodo carddav' "
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_LOG=syncevohttp.log "
# could be enabled, but reporting result is currently missing (BMC #1009)
# "CLIENT_TEST_RETRY=t "
# "CLIENT_TEST_RESEND=t "
# "CLIENT_TEST_SUSPEND=t "
# server supports refresh-from-client, use it for
# more efficient test setup
"CLIENT_TEST_DELETE_REFRESH=1 "
# server supports multiple cycles inside the same session
"CLIENT_TEST_PEER_CAN_RESTART=1 "
# server cannot detect pairs based on UID/RECURRENCE-ID
"CLIENT_TEST_ADD_BOTH_SIDES_SERVER_IS_DUMB=1 "
"CLIENT_TEST_SKIP="
,
testPrefix=syncevoPrefix)
context.add(test)
# EDS on client side, DAV on server.
test = SyncEvolutionTest("edsdav",
compile,
"", options.shell,
"Client::Sync::eds_event Client::Sync::eds_contact Client::Sync::eds_event_eds_contact",
[ "eds_event", "eds_contact" ],
"CLIENT_TEST_SIMPLE_UID=1 " # DAViCal server gets confused by UID with special characters
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_LOG=syncevohttp.log "
# could be enabled, but reporting result is currently missing (BMC #1009)
# "CLIENT_TEST_RETRY=t "
# "CLIENT_TEST_RESEND=t "
# "CLIENT_TEST_SUSPEND=t "
# server supports refresh-from-client, use it for
# more efficient test setup
"CLIENT_TEST_DELETE_REFRESH=1 "
# server supports multiple cycles inside the same session
"CLIENT_TEST_PEER_CAN_RESTART=1 "
"CLIENT_TEST_SKIP="
,
testPrefix=syncevoPrefix)
context.add(test)
scheduleworldtest = SyncEvolutionTest("scheduleworld", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact",
"eds_event",
"eds_task",
"eds_memo" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_FAILURES="
"Client::Sync::eds_memo::testManyItems,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testManyItems,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testManyItems CLIENT_TEST_SKIP=Client::Sync::eds_event::Retry,"
"Client::Sync::eds_event::Suspend,"
"Client::Sync::eds_event::Resend,"
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
"Client::Sync::eds_contact::Resend,"
"Client::Sync::eds_task::Retry,"
"Client::Sync::eds_task::Suspend,"
"Client::Sync::eds_task::Resend,"
"Client::Sync::eds_memo::Retry,"
"Client::Sync::eds_memo::Suspend,"
"Client::Sync::eds_memo::Resend,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::Retry,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::Suspend,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::Resend,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::Retry,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::Suspend,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::Resend "
"CLIENT_TEST_DELAY=5 "
"CLIENT_TEST_RESEND_TIMEOUT=5 "
"CLIENT_TEST_INTERRUPT_AT=1",
testPrefix=options.testprefix)
context.add(scheduleworldtest)
egroupwaretest = SyncEvolutionTest("egroupware", compile,
"", options.shell,
"Client::Sync::eds_contact "
"Client::Sync::eds_event::testCopy "
"Client::Sync::eds_event::testUpdate "
"Client::Sync::eds_event::testDelete "
"Client::Sync::eds_contact_eds_event::testCopy "
"Client::Sync::eds_contact_eds_event::testUpdate "
"Client::Sync::eds_contact_eds_event::testDelete "
"Client::Sync::eds_event_eds_contact::testCopy "
"Client::Sync::eds_event_eds_contact::testUpdate "
"Client::Sync::eds_event_eds_contact::testDelete ",
[ "eds_contact",
"eds_event" ],
# ContactSync::testRefreshFromServerSync,ContactSync::testRefreshFromClientSync,ContactSync::testDeleteAllRefresh,ContactSync::testRefreshSemantic,ContactSync::testRefreshStatus - refresh-from-client not supported by server
# ContactSync::testOneWayFromClient - not supported by server?
# ContactSync::testItems - loses a lot of information
# ContactSync::testComplexUpdate - only one phone number preserved
# ContactSync::testMaxMsg,ContactSync::testLargeObject,ContactSync::testLargeObjectBin - server fails to parse extra info?
# ContactSync::testTwinning - duplicates contacts
# CalendarSync::testCopy,CalendarSync::testUpdate - shifts time?
"CLIENT_TEST_FAILURES="
"ContactSync::testRefreshFromServerSync,"
"ContactSync::testRefreshFromClientSync,"
"ContactSync::testDeleteAllRefresh,"
"ContactSync::testRefreshSemantic,"
"ContactSync::testRefreshStatus,"
"ContactSync::testOneWayFromClient,"
"ContactSync::testAddUpdate,"
"ContactSync::testItems,"
"ContactSync::testComplexUpdate,"
"ContactSync::testTwinning,"
"ContactSync::testMaxMsg,"
"ContactSync::testLargeObject,"
"ContactSync::testLargeObjectBin,"
"CalendarSync::testCopy,"
"CalendarSync::testUpdate",
lambda x: x.replace('oasis.ethz.ch','<host hidden>').\
replace('cG9obHk6cWQyYTVtZ1gzZk5GQQ==','xxx'),
testPrefix=options.testprefix)
context.add(egroupwaretest)
class SynthesisTest(SyncEvolutionTest):
def __init__(self, name, build, synthesisdir, runner, testPrefix):
SyncEvolutionTest.__init__(self, name, build, "", # os.path.join(synthesisdir, "logs")
runner,
"Client::Sync",
[ "eds_contact",
"eds_memo" ],
"CLIENT_TEST_SKIP="
"Client::Sync::eds_event::Retry,"
"Client::Sync::eds_event::Suspend,"
"Client::Sync::eds_event::Resend,"
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
"Client::Sync::eds_contact::Resend,"
"Client::Sync::eds_task::Retry,"
"Client::Sync::eds_task::Suspend,"
"Client::Sync::eds_task::Resend,"
"Client::Sync::eds_memo::Retry,"
"Client::Sync::eds_memo::Suspend,"
"Client::Sync::eds_memo::Resend,"
"Client::Sync::eds_contact_eds_memo::Retry,"
"Client::Sync::eds_contact_eds_memo::Suspend,"
"Client::Sync::eds_contact_eds_memo::Resend "
"CLIENT_TEST_NUM_ITEMS=20 "
"CLIENT_TEST_DELAY=2 "
"CLIENT_TEST_RESEND_TIMEOUT=5",
serverName="synthesis",
testPrefix=testPrefix)
self.synthesisdir = synthesisdir
# self.dependencies.append(evolutiontest.name)
def execute(self):
if self.synthesisdir:
context.runCommand("synthesis start \"%s\"" % (self.synthesisdir))
time.sleep(5)
try:
SyncEvolutionTest.execute(self)
finally:
if self.synthesisdir:
context.runCommand("synthesis stop \"%s\"" % (self.synthesisdir))
synthesis = SynthesisTest("synthesis", compile,
options.synthesisdir,
options.shell,
options.testprefix)
context.add(synthesis)
class FunambolTest(SyncEvolutionTest):
def __init__(self, name, build, funamboldir, runner, testPrefix):
if funamboldir:
serverlogs = os.path.join(funamboldir, "ds-server", "logs", "funambol_ds.log")
else:
serverlogs = ""
SyncEvolutionTest.__init__(self, name, build, serverlogs,
runner,
"Client::Sync",
[ "eds_contact",
"eds_event",
"eds_task",
"eds_memo" ],
"CLIENT_TEST_SKIP="
# server duplicates items in add<->add conflict because it
# does not check UID
"Client::Sync::eds_event::testAddBothSides,"
"Client::Sync::eds_event::testAddBothSidesRefresh,"
"Client::Sync::eds_task::testAddBothSides,"
"Client::Sync::eds_task::testAddBothSidesRefresh,"
# test cannot pass because we don't have CtCap info about
# the Funambol server
"Client::Sync::eds_contact::testExtensions,"
" "
"CLIENT_TEST_XML=1 "
"CLIENT_TEST_MAX_ITEMSIZE=2048 "
"CLIENT_TEST_DELAY=10 "
# Using refresh-from-client is important, Funambol
# throttles slow syncs.
"CLIENT_TEST_DELETE_REFRESH=1 "
"CLIENT_TEST_FAILURES="
"Client::Sync::eds_contact::testTwinning,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testTwinning,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testTwinning "
"CLIENT_TEST_RESEND_TIMEOUT=5 "
"CLIENT_TEST_INTERRUPT_AT=1",
lineFilter=lambda x: x.replace('dogfood.funambol.com','<host hidden>'),
serverName="funambol",
testPrefix=testPrefix)
self.funamboldir = funamboldir
# self.dependencies.append(evolutiontest.name)
def execute(self):
if self.funamboldir:
context.runCommand("%s/tools/bin/funambol.sh start" % (self.funamboldir))
time.sleep(5)
try:
SyncEvolutionTest.execute(self)
finally:
if self.funamboldir:
context.runCommand("%s/tools/bin/funambol.sh stop" % (self.funamboldir))
funambol = FunambolTest("funambol", compile,
options.funamboldir,
options.shell,
options.testprefix)
context.add(funambol)
zybtest = SyncEvolutionTest("zyb", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_SKIP="
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
"Client::Sync::eds_contact::Resend "
"CLIENT_TEST_DELAY=5 ",
testPrefix=options.testprefix)
context.add(zybtest)
googletest = SyncEvolutionTest("google", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact" ],
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_XML=0 "
"CLIENT_TEST_MAX_ITEMSIZE=2048 "
"CLIENT_TEST_SKIP="
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
"Client::Sync::eds_contact::Resend,"
# refresh-from-client not supported by Google
"Client::Sync::eds_contact::testRefreshFromClientSync,"
"Client::Sync::eds_contact::testRefreshFromClientSemantic,"
"Client::Sync::eds_contact::testRefreshStatus,"
"Client::Sync::eds_contact::testDeleteAllRefresh,"
"Client::Sync::eds_contact::testOneWayFromClient,"
"Client::Sync::eds_contact::testRefreshFromLocalSync,"
"Client::Sync::eds_contact::testOneWayFromLocal,"
# only WBXML supported by Google
"Client::Sync::eds_contact::testItemsXML "
"CLIENT_TEST_DELAY=5 ",
testPrefix=options.testprefix)
context.add(googletest)
mobicaltest = SyncEvolutionTest("mobical", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact",
"eds_event",
"eds_task" ],
# "eds_memo" - no longer works, 400 "Bad Request"
# all-day detection in vCalendar 1.0
# only works if client and server
# agree on the time zone (otherwise the start/end times
# do not align with midnight); the nightly test account
# happens to use Europe/Berlin
"TZ=Europe/Berlin "
"CLIENT_TEST_NOCHECK_SYNCMODE=1 "
"CLIENT_TEST_MAX_ITEMSIZE=2048 "
"CLIENT_TEST_SKIP="
# server duplicates items in add<->add conflict because it
# does not check UID
"Client::Sync::eds_event::testAddBothSides,"
"Client::Sync::eds_event::testAddBothSidesRefresh,"
"Client::Sync::eds_task::testAddBothSides,"
"Client::Sync::eds_task::testAddBothSidesRefresh,"
"Client::Sync::.*::testRefreshFromClientSync,"
"Client::Sync::.*::testSlowSyncSemantic,"
"Client::Sync::.*::testRefreshStatus,"
"Client::Sync::.*::testDelete,"
"Client::Sync::.*::testItemsXML,"
"Client::Sync::.*::testOneWayFromServer,"
"Client::Sync::.*::testOneWayFromClient,"
"Client::Sync::.*::testRefreshFromLocalSync,"
"Client::Sync::.*::testOneWayFromLocal,"
"Client::Sync::.*::testOneWayFromRemote,"
"Client::Sync::.*::Retry,"
"Client::Sync::.*::Suspend,"
"Client::Sync::.*::Resend "
"CLIENT_TEST_DELAY=5 "
"CLIENT_TEST_RESEND_TIMEOUT=5 "
"CLIENT_TEST_INTERRUPT_AT=1",
testPrefix=options.testprefix)
context.add(mobicaltest)
memotootest = SyncEvolutionTest("memotoo", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact",
"eds_event",
"eds_task",
"eds_memo" ],
"CLIENT_TEST_NOCHECK_SYNCMODE=1 "
"CLIENT_TEST_NUM_ITEMS=10 "
"CLIENT_TEST_SKIP="
# server duplicates items in add<->add conflict because it
# does not check UID
"Client::Sync::eds_event::testAddBothSides,"
"Client::Sync::eds_event::testAddBothSidesRefresh,"
"Client::Sync::eds_task::testAddBothSides,"
"Client::Sync::eds_task::testAddBothSidesRefresh,"
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
# "Client::Sync::eds_contact::testRefreshFromClientSync,"
# "Client::Sync::eds_contact::testRefreshFromClientSemantic,"
# "Client::Sync::eds_contact::testDeleteAllRefresh,"
# "Client::Sync::eds_contact::testOneWayFromServer,"
"Client::Sync::eds_event::testRefreshFromClientSync,"
"Client::Sync::eds_event::testRefreshFromClientSemantic,"
"Client::Sync::eds_event::testOneWayFromServer,"
"Client::Sync::eds_event::testDeleteAllRefresh,"
"Client::Sync::eds_event::Retry,"
"Client::Sync::eds_event::Suspend,"
"Client::Sync::eds_task::testRefreshFromClientSync,"
"Client::Sync::eds_task::testRefreshFromClientSemantic,"
"Client::Sync::eds_task::testDeleteAllRefresh,"
"Client::Sync::eds_task::testOneWayFromServer,"
"Client::Sync::eds_task::Retry,"
"Client::Sync::eds_task::Suspend,"
"Client::Sync::eds_memo::testRefreshFromClientSync,"
"Client::Sync::eds_memo::testRefreshFromClientSemantic,"
"Client::Sync::eds_memo::testDeleteAllRefresh,"
"Client::Sync::eds_memo::testOneWayFromServer,"
"Client::Sync::eds_memo::Retry,"
"Client::Sync::eds_memo::Suspend,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testRefreshFromClientSync,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testRefreshFromClientSemantic,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testDeleteAllRefresh,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::testOneWayFromServer,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::Retry,"
"Client::Sync::eds_contact_eds_event_eds_task_eds_memo::Suspend,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testRefreshFromClientSync,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testRefreshFromClientSemantic,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testOneWayFromServer,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::testDeleteAllRefresh,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::Retry,"
"Client::Sync::eds_event_eds_task_eds_memo_eds_contact::Suspend "
"CLIENT_TEST_DELAY=10 "
"CLIENT_TEST_RESEND_TIMEOUT=5 "
"CLIENT_TEST_INTERRUPT_AT=1",
testPrefix=options.testprefix)
context.add(memotootest)
ovitest = SyncEvolutionTest("ovi", compile,
"", options.shell,
"Client::Sync",
[ "eds_contact",
"calendar+todo" ],
"CLIENT_TEST_DELETE_REFRESH=1 "
"CLIENT_TEST_NUM_ITEMS=50 "
"CLIENT_TEST_MAX_ITEMSIZE=512 "
"CLIENT_TEST_SKIP="
"Client::Sync::eds_contact::Retry,"
"Client::Sync::eds_contact::Suspend,"
"Client::Sync::eds_contact::testOneWayFromClient,"
"Client::Sync::eds_contact::testOneWayFromServer,"
"Client::Sync::eds_contact::testSlowSyncSemantic,"
"Client::Sync::eds_contact::testComplexRefreshFromServerSemantic,"
"Client::Sync::eds_contact::testDelete,"
"Client::Sync::eds_contact::testDeleteAllSync,"
"Client::Sync::eds_contact::testManyDeletes,"
"Client::Sync::calendar+todo::Retry,"
"Client::Sync::calendar+todo::Suspend,"
"Client::Sync::calendar+todo::testOneWayFromClient,"
"Client::Sync::calendar+todo::testOneWayFromServer,"
"Client::Sync::calendar+todo::testSlowSyncSemantic,"
"Client::Sync::calendar+todo::testComplexRefreshFromServerSemantic,"
"Client::Sync::calendar+todo::testDelete,"
"Client::Sync::calendar+todo::testDeleteAllSync,"
"Client::Sync::calendar+todo::testManyDeletes,"
"Client::Sync::calendar+todo::testDeleteAllRefresh,"
"Client::Sync::calendar+todo::testItemsXML,"
"Client::Sync::calendar+todo::testMaxMsg,"
"Client::Sync::calendar+todo::testLargeObject,"
"Client::Sync::calendar+todo_eds_contact::Retry,"
"Client::Sync::calendar+todo_eds_contact::Suspend,"
"Client::Sync::calendar+todo_eds_contact::testOneWayFromClient,"
"Client::Sync::calendar+todo_eds_contact::testOneWayFromServer,"
"Client::Sync::calendar+todo_eds_contact::testSlowSyncSemantic,"
"Client::Sync::calendar+todo_eds_contact::testComplexRefreshFromServerSemantic,"
"Client::Sync::calendar+todo_eds_contact::testDelete,"
"Client::Sync::calendar+todo_eds_contact::testDeleteAllSync,"
"Client::Sync::calendar+todo_eds_contact::testManyDeletes,"
"Client::Sync::calendar+todo::Retry,"
"Client::Sync::eds_contact_calendar+todo::Suspend,"
"Client::Sync::eds_contact_calendar+todo::testOneWayFromClient,"
"Client::Sync::eds_contact_calendar+todo::testOneWayFromServer,"
"Client::Sync::eds_contact_calendar+todo::testSlowSyncSemantic,"
"Client::Sync::eds_contact_calendar+todo::testComplexRefreshFromServerSemantic,"
"Client::Sync::eds_contact_calendar+todo::testDelete,"
"Client::Sync::eds_contact_calendar+todo::testDeleteAllSync,"
"Client::Sync::eds_contact_calendar+todo::testManyDeletes,"
"CLIENT_TEST_DELAY=5 "
"CLIENT_TEST_RESEND_TIMEOUT=5 "
"CLIENT_TEST_INTERRUPT_AT=1",
serverName="Ovi",
testPrefix=options.testprefix)
context.add(ovitest)
if options.list:
for action in context.todo:
print action.name
else:
context.execute()
| gpl-2.0 | 8,382,379,755,905,109,000 | 52.501145 | 311 | 0.515613 | false |
hoxm/plnpl | python/projects/flask-RESTful/flask-restful.py | 1 | 1709 | from flask import Flask, make_response, jsonify
from flask.ext.restful import Api, Resource, reqparse
from flask.ext.httpauth import HTTPBasicAuth
auth = HTTPBasicAuth()
@auth.verify_password
def get_password(username, password):
if username == 'root' and password == 'root':
return True
return False
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
app = Flask(__name__)
api = Api(app)
class FriendsAPI(Resource):
decorators = [auth.login_required]
def get(self):
return "Show all friends!"
class FriendAPI(Resource):
def __init__(self):
self.parse = reqparse.RequestParser()
self.parse.add_argument('name', type = str, required = True,
help = 'Name of the friend', location = 'json')
self.parse.add_argument('alias', type = str, location = 'json')
self.parse.add_argument('commnad', type = bool, location = 'json')
super(FriendAPI, self).__init__()
def get(self, name):
return "Get one friend: " + name
#curl -i -H "Content-Type: application/json" -X PUT
# -d '{"name":"hoxm", "alias":"test1"}'
# http://localhost:5000/api/v1.0/friend/hoxm
def put(self, name):
args = self.parse.parse_args()
rstr = "Update friend: " + name
for k, v in args.iteritems():
rstr += " %s:%s" %(k, v)
return rstr
def delete(self, name):
return "Delete friend: " + name
api.add_resource(FriendsAPI, '/api/v1.0/friends', endpoint = 'friends')
api.add_resource(FriendAPI, '/api/v1.0/friend/<name>', endpoint = 'friend')
if __name__ == '__main__':
app.run(debug=True)
| gpl-2.0 | -2,806,749,462,204,202,000 | 30.648148 | 75 | 0.61849 | false |
yuezh/WALinuxAgent | tests/upload_status_blob.py | 8 | 1516 | #!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import os
from env import waagent
"""
To run the test, you need to create a file under the same directory called:
status_blob_url.py
and defined the following 2 variables like:
blockBlobUrl="<sas link to a block blob with w/r access>"
pageBlobUrl="<sas link to a page blob with w/r access>"
"""
from status_blob_url import blockBlobUrl, pageBlobUrl
class MockConfig(object):
def get(self, keyName):
return None
waagent.Config = MockConfig()
if __name__ == '__main__':
waagent.LoggerInit('/dev/stdout', '/dev/null', verbose=True)
status = "a" * 512
waagent.UploadStatusBlob(blockBlobUrl, status.encode("utf-8"))
#waagent.UploadStatusBlob(pageBlobUrl, status.encode("utf-8"))
| apache-2.0 | 1,808,277,896,528,227,300 | 34.255814 | 75 | 0.73153 | false |
erwan-lemonnier/klue-client-server | pymacaron_core/swagger/server.py | 1 | 7857 | import jsonschema
import logging
import uuid
from functools import wraps
from werkzeug.exceptions import BadRequest
from flask import request, jsonify
from flask_cors import cross_origin
from pymacaron_core.exceptions import PyMacaronCoreException, ValidationError, add_error_handlers
from pymacaron_core.utils import get_function
from bravado_core.request import IncomingRequest, unmarshal_request
log = logging.getLogger(__name__)
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
def spawn_server_api(api_name, app, api_spec, error_callback, decorator):
"""Take a a Flask app and a swagger file in YAML format describing a REST
API, and populate the app with routes handling all the paths and methods
declared in the swagger file.
Also handle marshaling and unmarshaling between json and object instances
representing the definitions from the swagger file.
"""
def mycallback(endpoint):
handler_func = get_function(endpoint.handler_server)
# Generate api endpoint around that handler
handler_wrapper = _generate_handler_wrapper(api_name, api_spec, endpoint, handler_func, error_callback, decorator)
# Bind handler to the API path
log.info("Binding %s %s ==> %s" % (endpoint.method, endpoint.path, endpoint.handler_server))
endpoint_name = '_'.join([endpoint.method, endpoint.path]).replace('/', '_')
app.add_url_rule(endpoint.path, endpoint_name, handler_wrapper, methods=[endpoint.method])
api_spec.call_on_each_endpoint(mycallback)
# Add custom error handlers to the app
add_error_handlers(app)
def _responsify(api_spec, error, status):
"""Take a bravado-core model representing an error, and return a Flask Response
with the given error code and error instance as body"""
result_json = api_spec.model_to_json(error)
r = jsonify(result_json)
r.status_code = status
return r
def _generate_handler_wrapper(api_name, api_spec, endpoint, handler_func, error_callback, global_decorator):
"""Generate a handler method for the given url method+path and operation"""
# Decorate the handler function, if Swagger spec tells us to
if endpoint.decorate_server:
endpoint_decorator = get_function(endpoint.decorate_server)
handler_func = endpoint_decorator(handler_func)
@wraps(handler_func)
def handler_wrapper(**path_params):
log.info(" ")
log.info(" ")
log.info("=> INCOMING REQUEST %s %s -> %s" %
(endpoint.method, endpoint.path, handler_func.__name__))
log.info(" ")
log.info(" ")
# Get caller's pym-call-id or generate one
call_id = request.headers.get('PymCallID', None)
if not call_id:
call_id = str(uuid.uuid4())
stack.top.call_id = call_id
# Append current server to call path, or start one
call_path = request.headers.get('PymCallPath', None)
if call_path:
call_path = "%s.%s" % (call_path, api_name)
else:
call_path = api_name
stack.top.call_path = call_path
if endpoint.param_in_body or endpoint.param_in_query:
# Turn the flask request into something bravado-core can process...
try:
req = FlaskRequestProxy(request, endpoint.param_in_body)
except BadRequest:
ee = error_callback(ValidationError("Cannot parse json data: have you set 'Content-Type' to 'application/json'?"))
return _responsify(api_spec, ee, 400)
try:
# Note: unmarshall validates parameters but does not fail
# if extra unknown parameters are submitted
parameters = unmarshal_request(req, endpoint.operation)
# Example of parameters: {'body': RegisterCredentials()}
except jsonschema.exceptions.ValidationError as e:
ee = error_callback(ValidationError(str(e)))
return _responsify(api_spec, ee, 400)
# Call the endpoint, with proper parameters depending on whether
# parameters are in body, query or url
args = []
kwargs = {}
if endpoint.param_in_path:
kwargs = path_params
if endpoint.param_in_body:
# Remove the parameters already defined in path_params
for k in list(path_params.keys()):
del parameters[k]
lst = list(parameters.values())
assert len(lst) == 1
args.append(lst[0])
if endpoint.param_in_query:
kwargs.update(parameters)
result = handler_func(*args, **kwargs)
if not result:
e = error_callback(PyMacaronCoreException("Have nothing to send in response"))
return _responsify(api_spec, e, 500)
# Did we get the expected response?
if endpoint.produces_html:
if type(result) is not tuple:
e = error_callback(PyMacaronCoreException("Method %s should return %s but returned %s" %
(endpoint.handler_server, endpoint.produces, type(result))))
return _responsify(api_spec, e, 500)
# Return an html page
return result
elif endpoint.produces_json:
if not hasattr(result, '__module__') or not hasattr(result, '__class__'):
e = error_callback(PyMacaronCoreException("Method %s did not return a class instance but a %s" %
(endpoint.handler_server, type(result))))
return _responsify(api_spec, e, 500)
# If it's already a flask Response, just pass it through.
# Errors in particular may be either passed back as flask Responses, or
# raised as exceptions to be caught and formatted by the error_callback
result_type = result.__module__ + "." + result.__class__.__name__
if result_type == 'flask.wrappers.Response':
return result
# We may have got a pymacaron Error instance, in which case
# it has a http_reply() method...
if hasattr(result, 'http_reply'):
# Let's transform this Error into a flask Response
log.info("Looks like a pymacaron error instance - calling .http_reply()")
return result.http_reply()
# Otherwise, assume no error occured and make a flask Response out of
# the result.
# TODO: check that result is an instance of a model expected as response from this endpoint
result_json = api_spec.model_to_json(result)
# Send a Flask Response with code 200 and result_json
r = jsonify(result_json)
r.status_code = 200
return r
handler_wrapper = cross_origin(headers=['Content-Type', 'Authorization'])(handler_wrapper)
# And encapsulate all in a global decorator, if given one
if global_decorator:
handler_wrapper = global_decorator(handler_wrapper)
return handler_wrapper
class FlaskRequestProxy(IncomingRequest):
"""Take a flask.request object and make it look like a
bravado_core.request.IncomingRequest"""
path = None
query = None
form = None
headers = None
files = None
_json = None
def __init__(self, request, has_json):
self.request = request
self.query = request.args
self.path = request.view_args
self.headers = request.headers
if has_json:
self._json = self.request.get_json(force=True)
def json(self):
# Convert a weltkreuz ImmutableDict to a simple python dict
return self._json
| bsd-3-clause | -5,619,763,255,751,647,000 | 37.89604 | 130 | 0.625302 | false |
UManPychron/pychron | pychron/image/tasks/video_pane.py | 2 | 3953 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from __future__ import print_function
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import HasTraits, Any, File, String, Int, Enum, Instance, Dict, \
on_trait_change, Bool, Range
from traitsui.api import View, Item, UItem, EnumEditor
from pychron.canvas.canvas2D.video_canvas import VideoCanvas
from pychron.core.ui.stage_component_editor import VideoComponentEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class Source(HasTraits):
def url(self):
return
class LocalSource(Source):
path = File
def traits_view(self):
return View(UItem('path'))
def url(self):
return 'file://{}'.format(self.path)
class RemoteSource(Source):
host = String('localhost', enter_set=True, auto_set=False)
port = Int(1084, enter_set=True, auto_set=False)
def traits_view(self):
return View(
Item('host'),
Item('port'))
def url(self):
return 'pvs://{}:{}'.format(self.host, self.port)
class ControlsPane(TraitsDockPane):
name = 'Controls'
id = 'pychron.video.controls'
show_grids = Bool(False)
fps = Range(1, 12, 10)
quality = Range(1, 75, 10)
def traits_view(self):
v = View(
Item('show_grids', label='Grid'),
Item('fps'),
Item('quality'))
return v
class SourcePane(TraitsDockPane):
name = 'Source'
id = 'pychron.video.source'
kind = Enum('Remote', 'Local')
source = Instance(Source)
connections = Dict
selected_connection = Any
def traits_view(self):
v = View(
UItem('kind'),
UItem('source',
style='custom'),
UItem('selected_connection',
editor=EnumEditor(name='connections'),
style='custom'))
return v
def _kind_changed(self):
if self.kind == 'Local':
self.source = LocalSource()
else:
self.source = RemoteSource()
def _source_default(self):
return RemoteSource()
class BaseVideoPane(HasTraits):
component = Any
video = Any
@on_trait_change('video:fps')
def _update_fps(self):
print('set component fps', self.video.fps)
self.component.fps = self.video.fps
def _video_changed(self):
self.component.video = self.video
def _component_default(self):
c = VideoCanvas(video=self.video,
show_axes=False,
show_grids=False,
padding=5)
return c
def traits_view(self):
v = View(UItem('component',
style='custom',
editor=VideoComponentEditor()))
return v
class VideoPane(TraitsTaskPane, BaseVideoPane):
pass
class VideoDockPane(TraitsDockPane, BaseVideoPane):
id = 'pychron.video'
name = 'Video'
# ============= EOF =============================================
| apache-2.0 | -1,488,579,738,736,277,500 | 27.854015 | 81 | 0.570959 | false |
tricky-duck/pyth | test_add_channel.py | 1 | 2100 | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_channel(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_channel(self):
success = True
wd = self.wd
wd.get("http://seleniumbuilder.github.io/se-builder/")
wd.get("http://192.168.139.147/accounts/login/?next=/main/")
wd.find_element_by_name("username").click()
wd.find_element_by_name("username").clear()
wd.find_element_by_name("username").send_keys("1")
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("password").send_keys("1")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_css_selector("#main > table > tbody > tr > td").click()
wd.find_element_by_link_text("Каналы").click()
wd.find_element_by_link_text("Добавление канала").click()
wd.find_element_by_id("id_name").click()
wd.find_element_by_id("id_name").clear()
wd.find_element_by_id("id_name").send_keys("qwerty")
wd.find_element_by_id("id_programm").click()
wd.find_element_by_id("id_programm").clear()
wd.find_element_by_id("id_programm").send_keys("777")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_name("programm").click()
wd.find_element_by_name("programm").clear()
wd.find_element_by_name("programm").send_keys("777")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_link_text("Выйти").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -1,398,138,600,721,384,000 | 39.647059 | 82 | 0.61698 | false |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/utils/scheduler.py | 1 | 9086 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from dateutil.parser import parse
from ..execution_context import ExecutionContext
from ..utils.exception import patch_user_exc, ModifyExceptionFromType
from ..const import EXC_TYPE, EXECUTION_PHASE
from ..environment import Environment
from ..events import EVENT
try:
from inspect import signature
except ImportError:
from funcsigs import signature
def market_close(hour=0, minute=0):
minutes_since_midnight = 15 * 60 - hour * 60 - minute
if minutes_since_midnight < 13 * 60:
minutes_since_midnight -= 90
return minutes_since_midnight
def market_open(hour=0, minute=0):
minutes_since_midnight = 9 * 60 + 31 + hour * 60 + minute
if minutes_since_midnight > 11 * 60 + 30:
minutes_since_midnight += 90
return minutes_since_midnight
_scheduler = None
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT)
def run_daily(func, time_rule=None):
_scheduler.run_daily(func, time_rule)
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT)
def run_weekly(func, weekday=None, tradingday=None, time_rule=None):
_scheduler.run_weekly(func, weekday=weekday, tradingday=tradingday, time_rule=time_rule)
@ExecutionContext.enforce_phase(EXECUTION_PHASE.ON_INIT)
def run_monthly(func, tradingday=None, time_rule=None, **kwargs):
_scheduler.run_monthly(func, tradingday=tradingday, time_rule=time_rule, **kwargs)
def _verify_function(name, func):
if not callable(func):
raise patch_user_exc(ValueError('scheduler.{}: func should be callable'.format(name)))
signature = signature(func)
if len(signature.parameters) != 2:
raise patch_user_exc(TypeError(
'scheduler.{}: func should take exactly 2 arguments (context, bar_dict)'.format(name)))
class Scheduler(object):
_TRADING_DATES = None
@classmethod
def set_trading_dates_(cls, trading_dates):
cls._TRADING_DATES = trading_dates
def __init__(self, frequency):
self._registry = []
self._today = None
self._this_week = None
self._this_month = None
self._last_minute = 0
self._current_minute = 0
self._stage = None
self._ucontext = None
self._frequency = frequency
event_bus = Environment.get_instance().event_bus
event_bus.add_listener(EVENT.PRE_BEFORE_TRADING, self.next_day_)
event_bus.add_listener(EVENT.BEFORE_TRADING, self.before_trading_)
event_bus.add_listener(EVENT.BAR, self.next_bar_)
def set_user_context(self, ucontext):
self._ucontext = ucontext
@staticmethod
def _always_true():
return True
def _is_weekday(self, wd):
return self._today.weekday() == wd
def _is_nth_trading_day_in_week(self, n):
try:
return self._this_week[n] == self._today
except IndexError:
return False
def _is_nth_trading_day_in_month(self, n):
try:
return self._this_month[n] == self._today
except IndexError:
return False
def _should_trigger(self, n):
# 非股票交易时间段不触发
if self._current_minute < 9*60+31 or self._current_minute > 15*60:
return False
return self._last_minute < n <= self._current_minute
def _is_before_trading(self):
return self._stage == 'before_trading'
def _time_rule_for(self, time_rule):
if time_rule == 'before_trading':
return lambda: self._is_before_trading()
time_rule = time_rule if time_rule else self._minutes_since_midnight(9, 31)
return lambda: self._should_trigger(time_rule)
def run_daily(self, func, time_rule=None):
_verify_function('run_daily', func)
self._registry.append((self._always_true,
self._time_rule_for(time_rule),
func))
def run_weekly(self, func, weekday=None, tradingday=None, time_rule=None):
_verify_function('run_weekly', func)
if (weekday is not None and tradingday is not None) or (weekday is None and tradingday is None):
raise patch_user_exc(ValueError('select one of weekday/tradingday'))
if weekday is not None:
if weekday < 1 or weekday > 7:
raise patch_user_exc(ValueError('invalid weekday, should be in [1, 7]'))
day_checker = lambda: self._is_weekday(weekday - 1)
else:
if tradingday > 5 or tradingday < -5 or tradingday == 0:
raise patch_user_exc(ValueError('invalid trading day, should be in [-5, 0), (0, 5]'))
if tradingday > 0:
tradingday -= 1
day_checker = lambda: self._is_nth_trading_day_in_week(tradingday)
time_checker = self._time_rule_for(time_rule)
self._registry.append((day_checker, time_checker, func))
def run_monthly(self, func, tradingday=None, time_rule=None, **kwargs):
_verify_function('run_monthly', func)
if tradingday is None and 'monthday' in kwargs:
tradingday = kwargs.pop('monthday')
if kwargs:
raise patch_user_exc(ValueError('unknown argument: {}'.format(kwargs)))
if tradingday is None:
raise patch_user_exc(ValueError('tradingday is required'))
if tradingday > 23 or tradingday < -23 or tradingday == 0:
raise patch_user_exc(ValueError('invalid tradingday, should be in [-23, 0), (0, 23]'))
if tradingday > 0:
tradingday -= 1
time_checker = self._time_rule_for(time_rule)
self._registry.append((lambda: self._is_nth_trading_day_in_month(tradingday),
time_checker, func))
def next_day_(self):
if len(self._registry) == 0:
return
self._today = Environment.get_instance().trading_dt.date()
self._last_minute = 0
self._current_minute = 0
if not self._this_week or self._today > self._this_week[-1]:
self._fill_week()
if not self._this_month or self._today > self._this_month[-1]:
self._fill_month()
@staticmethod
def _minutes_since_midnight(hour, minute):
return hour * 60 + minute
def next_bar_(self, bars):
with ExecutionContext(EXECUTION_PHASE.SCHEDULED, bars):
self._current_minute = self._minutes_since_midnight(self._ucontext.now.hour, self._ucontext.now.minute)
for day_rule, time_rule, func in self._registry:
if day_rule() and time_rule():
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
func(self._ucontext, bars)
self._last_minute = self._current_minute
def before_trading_(self):
with ExecutionContext(EXECUTION_PHASE.BEFORE_TRADING):
self._stage = 'before_trading'
for day_rule, time_rule, func in self._registry:
if day_rule() and time_rule():
with ModifyExceptionFromType(EXC_TYPE.USER_EXC):
func(self._ucontext, None)
self._stage = None
def _fill_week(self):
weekday = self._today.isoweekday()
weekend = self._today + datetime.timedelta(days=7-weekday)
week_start = weekend - datetime.timedelta(days=6)
left = self._TRADING_DATES.searchsorted(week_start)
right = self._TRADING_DATES.searchsorted(weekend, side='right')
self._this_week = [d.date() for d in self._TRADING_DATES[left:right]]
def _fill_month(self):
try:
month_end = self._today.replace(month=self._today.month+1, day=1)
except ValueError:
month_end = self._today.replace(year=self._today.year+1, month=1, day=1)
month_begin = self._today.replace(day=1)
left, right = self._TRADING_DATES.searchsorted(month_begin), self._TRADING_DATES.searchsorted(month_end)
self._this_month = [d.date() for d in self._TRADING_DATES[left:right]]
def set_state(self, state):
r = json.loads(state.decode('utf-8'))
self._today = parse(r['today']).date()
self._last_minute = r['last_minute']
self._fill_month()
self._fill_week()
def get_state(self):
if self._today is None:
return None
return json.dumps({
'today': self._today.strftime('%Y-%m-%d'),
'last_minute': self._last_minute
}).encode('utf-8')
| mit | 8,981,379,791,192,020,000 | 35.256 | 115 | 0.623566 | false |
grembo/buildbot | master/buildbot/test/unit/test_db_migrate_versions_048_change_properties_to_text.py | 10 | 2864 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from random import choice
from string import ascii_lowercase
import sqlalchemy as sa
from twisted.trial import unittest
from buildbot.test.util import migration
from buildbot.util import sautils
class Migration(migration.MigrateTestMixin, unittest.TestCase):
def setUp(self):
return self.setUpMigrateTest()
def tearDown(self):
return self.tearDownMigrateTest()
def create_table_thd(self, conn):
metadata = sa.MetaData()
metadata.bind = conn
change_properties = sautils.Table(
'change_properties', metadata,
sa.Column('changeid', sa.Integer, nullable=False),
sa.Column('property_name', sa.String(256), nullable=False),
sa.Column('property_value', sa.String(1024), nullable=False),
)
change_properties.create()
def test_update(self):
def setup_thd(conn):
self.create_table_thd(conn)
def verify_thd(conn):
metadata = sa.MetaData()
metadata.bind = conn
random_length = 65535
random_string = ''.join(choice(ascii_lowercase)
for byte in range(random_length))
random_string = random_string.encode("ascii")
# Verify column type is text
change_properties = sautils.Table(
'change_properties', metadata, autoload=True)
self.assertIsInstance(
change_properties.c.property_value.type, sa.Text)
# Test write and read random string
conn.execute(change_properties.insert(), [dict(
changeid=1,
property_name="test_change_properties_property_value_length",
property_value=random_string,
)])
q = conn.execute(sa.select(
[change_properties.c.property_value]).where(change_properties.c.changeid == 1))
[self.assertEqual(q_string[0], random_string)
for q_string in q]
return self.do_test_migration(47, 48, setup_thd, verify_thd)
| gpl-2.0 | -4,133,184,959,068,521,000 | 34.8 | 95 | 0.648394 | false |
mahmoudShaheen/PyMedox | packages/pythonSQL.py | 1 | 7381 | #!/usr/bin/env python
#################################
# @author: Mahmoud Shaheen #
# MedicalBox IOT Project #
# python-MYSQL interface #
#################################
import sqlite3
import data #to use data using data.x 'change occurs here is limited to here'
from time import strftime
import datetime
import time
#Get current time "returns hours, minutes"
def getCTime():
h = strftime("%H")
m = strftime("%M")
s = strftime("%S")
currentTime = [h,m,s]
return currentTime
#Get the next time bills will be dispensed
def getNextSchedule():
if(checkEmptyTimetable()):
return False
db = sqlite3.connect(data.dbName)
curs = db.cursor()
hmTime=getCTime()
h=int(hmTime[0])
m=int(hmTime[1])
s=int(hmTime[2])
rTime=datetime.timedelta(hours=h,minutes=m,seconds=s)
sql = """SELECT `time` FROM `timetable` where `dispensed` = "0"
ORDER BY `time` ASC"""
curs.execute(sql)
tempTime = curs.fetchone()
timeArray = []
while (tempTime is not None):
tempTime = tempTime[0]
h,m,s = tempTime.split(':') #split the time string by ':'
tempTime = datetime.timedelta(hours=int(h),minutes=int(m),seconds=int(s)) #convert h,m,s to ints then to timedelta object
timeArray.append(tempTime)
tempTime = curs.fetchone()
close(db)
if (len(timeArray) == 0): #return false if timetable is empty
return False
#print "time array:\n" , timeArray
if(len(timeArray) > 0 ):
if (rTime > timeArray[-1]): #if currentTime > last item in ordered array "dispensing finished for today"
print"min: ", timeArray[0]
resetDispensed() #mark all drugs as not dispensed "as this is the end of the day"
return str(timeArray[0])
else:
for row in timeArray:
if (row > rTime):
print"row: " , row
return str(row)
return false;
#return whether timetable is empty or not
def checkEmptyTimetable():
db = sqlite3.connect(data.dbName)
curs = db.cursor()
hmTime=getCTime()
h=int(hmTime[0])
m=int(hmTime[1])
s=int(hmTime[2])
rTime=datetime.timedelta(hours=h,minutes=m,seconds=s)
sql = """SELECT `time` FROM `timetable`
ORDER BY `time` ASC"""
curs.execute(sql)
tempTime = curs.fetchone()
timeArray = []
while (tempTime is not None):
tempTime = tempTime[0]
h,m,s = tempTime.split(':') #split the time string by ':'
tempTime = datetime.timedelta(hours=int(h),minutes=int(m),seconds=int(s)) #convert h,m,s to ints then to timedelta object
timeArray.append(tempTime)
tempTime = curs.fetchone()
close(db)
if (len(timeArray) == 0): #return True if timetable is empty
return True
return False
#returns array of number of bills for every warehouse medicine which should be dispensed
def getBills(rTime):
rTime = addZero(rTime)
db = sqlite3.connect(data.dbName)
curs = db.cursor()
sql = """SELECT `bill_array` FROM `timetable`
WHERE `time` = '%s' """ % (rTime)
curs.execute(sql)
billString = curs.fetchone()
if(not billString is None):
billString = billString[0]
billArray = billString.split(",") #convert string to array by separator ","
bills = [int(i) for i in billArray] #convert the string array to int array
else:
bills = [0,0,0,0]
close(db)
return bills
def checkBills(bills): #check if the bills in the box will be enough for the schedule, accepts array of bills [1,0,2,0]
db = sqlite3.connect(data.dbName)
curs = db.cursor()
for i in range(1,data.warehouseCount+1): #1 for id to start from 1
sql = """SELECT `bill_count` FROM `warehouse`
WHERE `id` = '%d' """ % (i)
curs.execute(sql)
remainingBills = curs.fetchone()
remainingBills = remainingBills[0]
if bills[i-1] > remainingBills:
close(db)
return False
close(db)
return True
def subtractBills(bills): #update bill_count after dispensing, accepts array of bills [1,0,2,0]
db = sqlite3.connect(data.dbName)
curs = db.cursor()
for i in range(1,data.warehouseCount+1): #1 for id to start from 1
sql = """SELECT `bill_count` FROM `warehouse`
WHERE `id` = '%d' """ % (i)
curs.execute(sql)
remainingBills = curs.fetchone()
remainingBills = remainingBills[0]
newValue = remainingBills - bills[i-1]
sql = """UPDATE warehouse SET
bill_count='%d'
WHERE id = '%d'"""%(newValue,i)
curs.execute(sql)
close(db)
def markDispensed(rTime): #mark a time as dispensed
rTime = addZero(rTime)
db = sqlite3.connect(data.dbName)
curs = db.cursor()
sql = """UPDATE timetable SET
`dispensed` ='%d'
WHERE `time` = '%s'"""%(1,rTime)
curs.execute(sql)
close(db)
def resetDispensed():
print "resetDispensed called"
db = sqlite3.connect(data.dbName)
curs = db.cursor()
sql = """UPDATE `timetable` SET
`dispensed` = '%d'"""%(0)
curs.execute(sql)
close(db)
def isDispensed(rTime):
rTime = addZero(rTime)
db = sqlite3.connect(data.dbName)
curs = db.cursor()
sql = """SELECT `dispensed` FROM `timetable`
WHERE `time` = '%s'"""%(rTime)
curs.execute(sql)
tempDis = curs.fetchone()
close(db)
return int(tempDis[0])
def close(db):
db.commit()
db.close()
def clearTimetable():
db = sqlite3.connect(data.dbName)
curs = db.cursor()
data.scheduleChanged = True
curs.execute("DELETE FROM timetable")
close(db)
def refreshTimetable(timeArray, drugArray):
clearTimetable()
db = sqlite3.connect(data.dbName)
curs = db.cursor()
i = 0
for row in timeArray:
sql = """INSERT INTO `timetable` (id ,time, bill_array, dispensed)
VALUES ('%d', '%s', '%s', '%d');""" % (i+1, timeArray[i], drugArray[i], 0) #i+1 for id to start from 1
curs.execute(sql)
i = i + 1
close(db)
def addBills(billArray):
db = sqlite3.connect(data.dbName)
curs = db.cursor()
for i in range(1,data.warehouseCount+1): #1 for id to start from 1
sql = """SELECT `bill_count` FROM `warehouse`
WHERE `id` = '%d' """ % (i)
curs.execute(sql)
remainingBills = curs.fetchone()
remainingBills = remainingBills[0]
newValue = remainingBills + billArray[i-1]
sql = """UPDATE warehouse SET
bill_count='%d'
WHERE id = '%d'"""%(newValue,i)
curs.execute(sql)
close(db)
def clearBills():
db = sqlite3.connect(data.dbName)
curs = db.cursor()
for i in range(1,data.warehouseCount+1): #1 for id to start from 1
sql = """UPDATE warehouse SET
bill_count='%d'
WHERE id = '%d'"""%(0,i)
curs.execute(sql)
close(db)
def getBillCount():
db = sqlite3.connect(data.dbName)
curs = db.cursor()
billCount = ""
for i in range(1,data.warehouseCount+1): #1 for id to start from 1
sql = """SELECT `bill_count` FROM `warehouse`
WHERE `id` = '%d' """ % (i)
curs.execute(sql)
remainingBills = curs.fetchone()
remainingBills = remainingBills[0]
billCount = billCount + str(remainingBills) + ","
billCount = billCount[:-1] #remove the last ","
return billCount
#return the sum of the required bills in 24 hours "total in timetable"
def getTotalDayBills():
db = sqlite3.connect(data.dbName)
curs = db.cursor()
totalBills = [0,0,0,0]
sql = "SELECT `bill_array` FROM `timetable` "
curs.execute(sql)
billString = curs.fetchone()
while (billString != None):
billString = billString[0]
billArray = billString.split(",") #convert string to array by separator ","
bills = [int(i) for i in billArray] #convert the string array to int array
totalBills = [x + y for x, y in zip(totalBills, bills)]
billString = curs.fetchone()
return totalBills
def addZero(rTime):
h, m, s = rTime.split(":")
h = int(h)
if(h < 10):
rTime = "0" + rTime
return rTime | mit | 2,889,561,314,581,735,400 | 28.177866 | 123 | 0.669286 | false |
Juraci/tempest | tempest/api/compute/floating_ips/test_floating_ips_actions_negative.py | 9 | 4249 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.compute.floating_ips import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class FloatingIPsNegativeTestJSON(base.BaseFloatingIPsTest):
server_id = None
@classmethod
def setup_clients(cls):
super(FloatingIPsNegativeTestJSON, cls).setup_clients()
cls.client = cls.floating_ips_client
@classmethod
def resource_setup(cls):
super(FloatingIPsNegativeTestJSON, cls).resource_setup()
# Server creation
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
# Generating a nonexistent floatingIP id
cls.floating_ip_ids = []
body = cls.client.list_floating_ips()
for i in range(len(body)):
cls.floating_ip_ids.append(body[i]['id'])
while True:
cls.non_exist_id = data_utils.rand_int_id(start=999)
if CONF.service_available.neutron:
cls.non_exist_id = str(uuid.uuid4())
if cls.non_exist_id not in cls.floating_ip_ids:
break
@test.attr(type=['negative'])
@test.idempotent_id('6e0f059b-e4dd-48fb-8207-06e3bba5b074')
@test.services('network')
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Negative test:Allocation of a new floating IP from a nonexistent_pool
# to a project should fail
self.assertRaises(lib_exc.NotFound,
self.client.create_floating_ip,
"non_exist_pool")
@test.attr(type=['negative'])
@test.idempotent_id('ae1c55a8-552b-44d4-bfb6-2a115a15d0ba')
@test.services('network')
def test_delete_nonexistent_floating_ip(self):
# Negative test:Deletion of a nonexistent floating IP
# from project should fail
# Deleting the non existent floating IP
self.assertRaises(lib_exc.NotFound, self.client.delete_floating_ip,
self.non_exist_id)
@test.attr(type=['negative'])
@test.idempotent_id('595fa616-1a71-4670-9614-46564ac49a4c')
@test.services('network')
def test_associate_nonexistent_floating_ip(self):
# Negative test:Association of a non existent floating IP
# to specific server should fail
# Associating non existent floating IP
self.assertRaises(lib_exc.NotFound,
self.client.associate_floating_ip_to_server,
"0.0.0.0", self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('0a081a66-e568-4e6b-aa62-9587a876dca8')
@test.services('network')
def test_dissociate_nonexistent_floating_ip(self):
# Negative test:Dissociation of a non existent floating IP should fail
# Dissociating non existent floating IP
self.assertRaises(lib_exc.NotFound,
self.client.disassociate_floating_ip_from_server,
"0.0.0.0", self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('804b4fcb-bbf5-412f-925d-896672b61eb3')
@test.services('network')
def test_associate_ip_to_server_without_passing_floating_ip(self):
# Negative test:Association of empty floating IP to specific server
# should raise NotFound or BadRequest(In case of Nova V2.1) exception.
self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
self.client.associate_floating_ip_to_server,
'', self.server_id)
| apache-2.0 | -1,418,141,405,024,298,800 | 39.466667 | 79 | 0.655448 | false |
kenrobbins/tardyrush | tardyrush/helpers/filters.py | 1 | 3154 | import re
import pytz
import urllib
import datetime
import teams
from babel import dates
from jinja2 import evalcontextfilter, Markup, escape
from flaskext.babel import format_datetime, to_user_timezone
from tardyrush import app
from flask.ext.wtf import HiddenField
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
@app.template_filter('nl2br')
@evalcontextfilter
def nl2br(eval_ctx, value):
result = u'\n\n'.join(u'<p>%s</p>' % p.replace('\n', '<br/>\n') \
for p in _paragraph_re.split(escape(value)))
if eval_ctx.autoescape:
result = Markup(result)
return result
@app.template_filter()
def urlencode(url, **kwargs):
seq = []
for key, val in kwargs.iteritems():
if isinstance(val, (list, tuple)):
for v in val:
seq.append( (key, v) )
else:
seq.append( (key, val) )
return "%s?%s" % (url, urllib.urlencode(seq))
@app.template_filter()
def add_time(dt, **kwargs):
return dt + datetime.timedelta(**kwargs)
@app.template_filter()
def kdr(fmt, kills, deaths):
if deaths == 0:
return '<span class="inf_kdr">∞</span>'
kdr = float(kills) / float(deaths)
return fmt % kdr
@app.template_filter()
def match_last_updated_format(value):
return format_datetime(value, "MMM d 'at' h':'mm a")
@app.template_filter()
def matches_datetime_format(value):
# show the year if the value's year is not the current year, but only do
# that if it's more than 45 days in the future. that way, at end of the
# year, it doesn't show the year for everything.
utcnow = datetime.datetime.utcnow()
if value.year != utcnow.year:
return format_datetime(value, "MMM d',' yyyy 'at' h':'mm a zzz")
return format_datetime(value, "EEE',' MMM d 'at' h':'mm a zzz")
@app.template_filter()
def matches_date_format(value):
return format_datetime(value, "MMMM d',' yyyy")
@app.template_filter()
def matches_time_format(value):
return format_datetime(value, "h':'mm a zzz")
@app.template_filter()
def matches_datetime_format_full(value):
return format_datetime(value, "EEEE',' MMMM d',' yyyy 'at' h':'mm a zzz")
@app.template_filter()
def record_format(value):
out = "%d-%d" % (value[0], value[1])
if value[2]:
out += "-%d" % (value[2])
return out
def matches_datetime_format_full_for_team(dt, tz):
return dates.format_datetime(dt,
"EEEE',' MMMM d',' yyyy 'at' h':'mm a zzz",
locale='en_US',
tzinfo=pytz.timezone(tz))
def matches_datetime_format_for_team(value, tz):
utcnow = datetime.datetime.utcnow()
if value.year != utcnow.year:
return dates.format_datetime(value,
"MMM d',' yyyy 'at' h':'mm a zzz",
locale='en_US',
tzinfo=pytz.timezone(tz))
return dates.format_datetime(value,
"EEE',' MMM d 'at' h':'mm a zzz",
locale='en_US',
tzinfo=pytz.timezone(tz))
@app.template_filter()
def join_none(val, d=u''):
return d.join(v for v in val if v)
@app.template_filter()
def is_hidden_field(field):
return isinstance(field, HiddenField)
| mit | 6,351,483,888,426,617,000 | 29.326923 | 77 | 0.625555 | false |
Morphux/installer | pkgs/shadow_p2/shadow_p2.py | 1 | 3361 | ################################### LICENSE ####################################
# Copyright 2016 Morphux #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
##
# shadow_p2.py
# Created: 21/12/2016
# By: Louis Solofrizzo <[email protected]>
##
import os
class Shadow_P2:
conf_lst = {}
e = False
root_dir = ""
def init(self, c_lst, ex, root_dir):
self.conf_lst = c_lst
self.e = ex
self.root_dir = root_dir
self.config = {
"name": "shadow", # Name of the package
"version": "4.2.1", # Version of the package
"size": 42, # Size of the installed package (MB)
"archive": "shadow-4.2.1.tar.xz", # Archive name
"SBU": 0.2, # SBU (Compilation time)
"tmp_install": False, # Is this package part of the temporary install
"next": "psmisc", # Next package to install
"urls": [ # Url to download the package. The first one must be morphux servers
"https://install.morphux.org/packages/shadow-4.2.1.tar.xz"
]
}
return self.config
def before(self):
self.e(["sed -i 's/groups$(EXEEXT) //' src/Makefile.in"], shell=True)
self.e(["find man -name Makefile.in -exec sed -i 's/groups\.1 / /' {} \;"], shell=True)
self.e(["find man -name Makefile.in -exec sed -i 's/getspnam\.3 / /' {} \;"], shell=True)
self.e(["find man -name Makefile.in -exec sed -i 's/passwd\.5 / /' {} \;"], shell=True)
self.e(["sed -i -e 's@#ENCRYPT_METHOD DES@ENCRYPT_METHOD SHA512@' -e 's@/var/spool/mail@/var/mail@' etc/login.defs"], shell=True)
return self.e(["sed -i 's/1000/999/' etc/useradd"], shell=True)
def configure(self):
return self.e(["./configure",
"--sysconfdir=/etc",
"--with-group-name-max-length=32"
])
def make(self):
return self.e(["make", "-j", self.conf_lst["cpus"]])
def install(self):
return self.e(["make", "install"])
def after(self):
if "MERGE_USR" in self.conf_lst["config"] and self.conf_lst["config"]["MERGE_USR"] != True:
self.e(["mv", "-v", "/usr/bin/passwd", "/bin"])
self.e(["pwconv"])
return self.e(["grpconv"])
| apache-2.0 | 2,872,644,432,494,339,000 | 45.041096 | 137 | 0.469206 | false |
RichHelle/data-science-from-scratch | first-edition/code-python3/mapreduce.py | 12 | 5556 | import math, random, re, datetime
from collections import defaultdict, Counter
from functools import partial
from naive_bayes import tokenize
def word_count_old(documents):
"""word count not using MapReduce"""
return Counter(word
for document in documents
for word in tokenize(document))
def wc_mapper(document):
"""for each word in the document, emit (word,1)"""
for word in tokenize(document):
yield (word, 1)
def wc_reducer(word, counts):
"""sum up the counts for a word"""
yield (word, sum(counts))
def word_count(documents):
"""count the words in the input documents using MapReduce"""
# place to store grouped values
collector = defaultdict(list)
for document in documents:
for word, count in wc_mapper(document):
collector[word].append(count)
return [output
for word, counts in collector.items()
for output in wc_reducer(word, counts)]
def map_reduce(inputs, mapper, reducer):
"""runs MapReduce on the inputs using mapper and reducer"""
collector = defaultdict(list)
for input in inputs:
for key, value in mapper(input):
collector[key].append(value)
return [output
for key, values in collector.items()
for output in reducer(key,values)]
def reduce_with(aggregation_fn, key, values):
"""reduces a key-values pair by applying aggregation_fn to the values"""
yield (key, aggregation_fn(values))
def values_reducer(aggregation_fn):
"""turns a function (values -> output) into a reducer"""
return partial(reduce_with, aggregation_fn)
sum_reducer = values_reducer(sum)
max_reducer = values_reducer(max)
min_reducer = values_reducer(min)
count_distinct_reducer = values_reducer(lambda values: len(set(values)))
#
# Analyzing Status Updates
#
status_updates = [
{"id": 1,
"username" : "joelgrus",
"text" : "Is anyone interested in a data science book?",
"created_at" : datetime.datetime(2013, 12, 21, 11, 47, 0),
"liked_by" : ["data_guy", "data_gal", "bill"] },
# add your own
]
def data_science_day_mapper(status_update):
"""yields (day_of_week, 1) if status_update contains "data science" """
if "data science" in status_update["text"].lower():
day_of_week = status_update["created_at"].weekday()
yield (day_of_week, 1)
data_science_days = map_reduce(status_updates,
data_science_day_mapper,
sum_reducer)
def words_per_user_mapper(status_update):
user = status_update["username"]
for word in tokenize(status_update["text"]):
yield (user, (word, 1))
def most_popular_word_reducer(user, words_and_counts):
"""given a sequence of (word, count) pairs,
return the word with the highest total count"""
word_counts = Counter()
for word, count in words_and_counts:
word_counts[word] += count
word, count = word_counts.most_common(1)[0]
yield (user, (word, count))
user_words = map_reduce(status_updates,
words_per_user_mapper,
most_popular_word_reducer)
def liker_mapper(status_update):
user = status_update["username"]
for liker in status_update["liked_by"]:
yield (user, liker)
distinct_likers_per_user = map_reduce(status_updates,
liker_mapper,
count_distinct_reducer)
#
# matrix multiplication
#
def matrix_multiply_mapper(m, element):
"""m is the common dimension (columns of A, rows of B)
element is a tuple (matrix_name, i, j, value)"""
matrix, i, j, value = element
if matrix == "A":
for column in range(m):
# A_ij is the jth entry in the sum for each C_i_column
yield((i, column), (j, value))
else:
for row in range(m):
# B_ij is the ith entry in the sum for each C_row_j
yield((row, j), (i, value))
def matrix_multiply_reducer(m, key, indexed_values):
results_by_index = defaultdict(list)
for index, value in indexed_values:
results_by_index[index].append(value)
# sum up all the products of the positions with two results
sum_product = sum(results[0] * results[1]
for results in results_by_index.values()
if len(results) == 2)
if sum_product != 0.0:
yield (key, sum_product)
if __name__ == "__main__":
documents = ["data science", "big data", "science fiction"]
wc_mapper_results = [result
for document in documents
for result in wc_mapper(document)]
print("wc_mapper results")
print(wc_mapper_results)
print()
print("word count results")
print(word_count(documents))
print()
print("word count using map_reduce function")
print(map_reduce(documents, wc_mapper, wc_reducer))
print()
print("data science days")
print(data_science_days)
print()
print("user words")
print(user_words)
print()
print("distinct likers")
print(distinct_likers_per_user)
print()
# matrix multiplication
entries = [("A", 0, 0, 3), ("A", 0, 1, 2),
("B", 0, 0, 4), ("B", 0, 1, -1), ("B", 1, 0, 10)]
mapper = partial(matrix_multiply_mapper, 3)
reducer = partial(matrix_multiply_reducer, 3)
print("map-reduce matrix multiplication")
print("entries:", entries)
print("result:", map_reduce(entries, mapper, reducer))
| unlicense | 456,885,410,025,940,860 | 28.870968 | 76 | 0.613751 | false |
shahbazn/neutron | neutron/tests/unit/extensions/test_dns.py | 11 | 22297 | # Copyright 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
import netaddr
from oslo_config import cfg
from neutron.common import constants
from neutron.common import utils
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.extensions import dns
from neutron.tests.unit.db import test_db_base_plugin_v2
class DnsExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
def get_extended_resources(self, version):
return dns.get_extended_resources(version)
class DnsExtensionTestPlugin(db_base_plugin_v2.NeutronDbPluginV2):
"""Test plugin to mixin the DNS Integration extensions.
"""
supported_extension_aliases = ["dns-integration"]
class DnsExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2):
"""Test API extension dns attributes.
"""
def setUp(self):
plugin = ('neutron.tests.unit.extensions.test_dns.' +
'DnsExtensionTestPlugin')
ext_mgr = DnsExtensionManager()
super(DnsExtensionTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _create_port(self, fmt, net_id, expected_res_status=None,
arg_list=None, **kwargs):
data = {'port': {'network_id': net_id,
'tenant_id': self._tenant_id}}
for arg in (('admin_state_up', 'device_id',
'mac_address', 'name', 'fixed_ips',
'tenant_id', 'device_owner', 'security_groups',
'dns_name') + (arg_list or ())):
# Arg must be present
if arg in kwargs:
data['port'][arg] = kwargs[arg]
# create a dhcp port device id if one hasn't been supplied
if ('device_owner' in kwargs and
kwargs['device_owner'] == constants.DEVICE_OWNER_DHCP and
'host' in kwargs and
'device_id' not in kwargs):
device_id = utils.get_dhcp_agent_device_id(net_id, kwargs['host'])
data['port']['device_id'] = device_id
port_req = self.new_create_request('ports', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
port_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
port_res = port_req.get_response(self.api)
if expected_res_status:
self.assertEqual(port_res.status_int, expected_res_status)
return port_res
def _test_list_resources(self, resource, items, neutron_context=None,
query_params=None):
res = self._list('%ss' % resource,
neutron_context=neutron_context,
query_params=query_params)
resource = resource.replace('-', '_')
self.assertItemsEqual([i['id'] for i in res['%ss' % resource]],
[i[resource]['id'] for i in items])
return res
def test_create_port_json(self):
keys = [('admin_state_up', True), ('status', self.port_create_status)]
with self.port(name='myname') as port:
for k, v in keys:
self.assertEqual(port['port'][k], v)
self.assertIn('mac_address', port['port'])
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual('myname', port['port']['name'])
self._verify_dns_assigment(port['port'],
ips_list=['10.0.0.2'])
def test_list_ports(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
with self.port() as v1, self.port() as v2, self.port() as v3:
ports = (v1, v2, v3)
res = self._test_list_resources('port', ports)
for port in res['ports']:
self._verify_dns_assigment(
port, ips_list=[port['fixed_ips'][0]['ip_address']])
def test_show_port(self):
with self.port() as port:
req = self.new_show_request('ports', port['port']['id'], self.fmt)
sport = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port']['id'], sport['port']['id'])
self._verify_dns_assigment(
sport['port'],
ips_list=[sport['port']['fixed_ips'][0]['ip_address']])
def test_update_port_non_default_dns_domain_with_dns_name(self):
with self.port() as port:
cfg.CONF.set_override('dns_domain', 'example.com')
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.2'],
dns_name='vm1')
def test_update_port_default_dns_domain_with_dns_name(self):
with self.port() as port:
data = {'port': {'admin_state_up': False, 'dns_name': 'vm1'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.2'])
def _verify_dns_assigment(self, port, ips_list=[], exp_ips_ipv4=0,
exp_ips_ipv6=0, ipv4_cidrs=[], ipv6_cidrs=[],
dns_name=''):
self.assertEqual(port['dns_name'], dns_name)
dns_assignment = port['dns_assignment']
if ips_list:
self.assertEqual(len(dns_assignment), len(ips_list))
ips_set = set(ips_list)
else:
self.assertEqual(len(dns_assignment), exp_ips_ipv4 + exp_ips_ipv6)
ipv4_count = 0
ipv6_count = 0
subnets_v4 = [netaddr.IPNetwork(cidr) for cidr in ipv4_cidrs]
subnets_v6 = [netaddr.IPNetwork(cidr) for cidr in ipv6_cidrs]
request_dns_name, request_fqdn = self._get_request_hostname_and_fqdn(
dns_name)
for assignment in dns_assignment:
if ips_list:
self.assertIn(assignment['ip_address'], ips_set)
ips_set.remove(assignment['ip_address'])
else:
ip = netaddr.IPAddress(assignment['ip_address'])
if ip.version == 4:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v4))
ipv4_count += 1
else:
self.assertTrue(self._verify_ip_in_subnet(ip, subnets_v6))
ipv6_count += 1
hostname, fqdn = self._get_hostname_and_fqdn(request_dns_name,
request_fqdn,
assignment)
self.assertEqual(assignment['hostname'], hostname)
self.assertEqual(assignment['fqdn'], fqdn)
if ips_list:
self.assertFalse(ips_set)
else:
self.assertEqual(ipv4_count, exp_ips_ipv4)
self.assertEqual(ipv6_count, exp_ips_ipv6)
def _get_dns_domain(self):
if not cfg.CONF.dns_domain:
return ''
if cfg.CONF.dns_domain.endswith('.'):
return cfg.CONF.dns_domain
return '%s.' % cfg.CONF.dns_domain
def _get_request_hostname_and_fqdn(self, dns_name):
request_dns_name = ''
request_fqdn = ''
dns_domain = self._get_dns_domain()
if dns_name and dns_domain and dns_domain != 'openstacklocal.':
request_dns_name = dns_name
request_fqdn = request_dns_name
if not request_dns_name.endswith('.'):
request_fqdn = '%s.%s' % (dns_name, dns_domain)
return request_dns_name, request_fqdn
def _get_hostname_and_fqdn(self, request_dns_name, request_fqdn,
assignment):
dns_domain = self._get_dns_domain()
if request_dns_name:
hostname = request_dns_name
fqdn = request_fqdn
else:
hostname = 'host-%s' % assignment['ip_address'].replace(
'.', '-').replace(':', '-')
fqdn = hostname
if dns_domain:
fqdn = '%s.%s' % (hostname, dns_domain)
return hostname, fqdn
def _verify_ip_in_subnet(self, ip, subnets_list):
for subnet in subnets_list:
if ip in subnet:
return True
return False
def test_update_port_update_ip(self):
"""Test update of port IP.
Check that a configured IP 10.0.0.2 is replaced by 10.0.0.10.
"""
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.10')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
self._verify_dns_assigment(res['port'], ips_list=['10.0.0.10'])
def test_update_port_update_ip_address_only(self):
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
ips = port['port']['fixed_ips']
self.assertEqual(len(ips), 1)
self.assertEqual(ips[0]['ip_address'], '10.0.0.2')
self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id'])
data = {'port': {'fixed_ips': [{'subnet_id':
subnet['subnet']['id'],
'ip_address': "10.0.0.10"},
{'ip_address': "10.0.0.2"}]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ips = res['port']['fixed_ips']
self.assertEqual(len(ips), 2)
self.assertIn({'ip_address': '10.0.0.2',
'subnet_id': subnet['subnet']['id']}, ips)
self.assertIn({'ip_address': '10.0.0.10',
'subnet_id': subnet['subnet']['id']}, ips)
self._verify_dns_assigment(res['port'],
ips_list=['10.0.0.10',
'10.0.0.2'])
def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self):
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_pqdn_and_no_dns_domain(
self):
cfg.CONF.set_override('dns_domain', '')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_no_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_and_dns_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'example.com.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.example.com.')
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_fqdn_default_domain_period(
self):
cfg.CONF.set_override('dns_domain', 'openstacklocal.')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets()
self.assertEqual(res.status_code, 201)
def test_create_port_multiple_v4_v6_subnets_bad_fqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name='vm1.bad-domain.com.')
self.assertEqual(res.status_code, 400)
expected_error = ('The dns_name passed is a FQDN. Its higher level '
'labels must be equal to the dns_domain option in '
'neutron.conf')
self.assertIn(expected_error, res.text)
def test_create_port_multiple_v4_v6_subnets_bad_pqdn_and_dns_domain(
self):
cfg.CONF.set_override('dns_domain', 'example.com')
num_labels = int(
math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
dns_name = (('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * filler_len)
res = self._test_create_port_with_multiple_ipv4_and_ipv6_subnets(
dns_name=dns_name)
self.assertEqual(res.status_code, 400)
expected_error = ("When the two are concatenated to form a FQDN "
"(with a '.' at the end), the resulting length "
"exceeds the maximum size")
self.assertIn(expected_error, res.text)
def _test_create_port_with_multiple_ipv4_and_ipv6_subnets(self,
dns_name=''):
"""Test port create with multiple IPv4, IPv6 DHCP/SLAAC subnets."""
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dicts = [
{'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': '10.0.1.1', 'cidr': '10.0.1.0/24',
'ip_version': 4, 'ra_addr_mode': None},
{'gateway': 'fe80::1', 'cidr': 'fe80::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe81::1', 'cidr': 'fe81::/64',
'ip_version': 6, 'ra_addr_mode': constants.IPV6_SLAAC},
{'gateway': 'fe82::1', 'cidr': 'fe82::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL},
{'gateway': 'fe83::1', 'cidr': 'fe83::/64',
'ip_version': 6, 'ra_addr_mode': constants.DHCPV6_STATEFUL}]
subnets = {}
for sub_dict in sub_dicts:
subnet = self._make_subnet(
self.fmt, network,
gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
subnets[subnet['subnet']['id']] = sub_dict
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
if res.status_code != 201:
return res
port = self.deserialize(self.fmt, res)
# Since the create port request was made without a list of fixed IPs,
# the port should be associated with addresses for one of the
# IPv4 subnets, one of the DHCPv6 subnets, and both of the IPv6
# SLAAC subnets.
self.assertEqual(4, len(port['port']['fixed_ips']))
addr_mode_count = {None: 0, constants.DHCPV6_STATEFUL: 0,
constants.IPV6_SLAAC: 0}
for fixed_ip in port['port']['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
if subnet_id in subnets:
addr_mode_count[subnets[subnet_id]['ra_addr_mode']] += 1
self.assertEqual(1, addr_mode_count[None])
self.assertEqual(1, addr_mode_count[constants.DHCPV6_STATEFUL])
self.assertEqual(2, addr_mode_count[constants.IPV6_SLAAC])
self._verify_dns_assigment(port['port'], exp_ips_ipv4=1,
exp_ips_ipv6=1,
ipv4_cidrs=[sub_dicts[0]['cidr'],
sub_dicts[1]['cidr']],
ipv6_cidrs=[sub_dicts[4]['cidr'],
sub_dicts[5]['cidr']],
dns_name=dns_name)
return res
def test_api_extension_validation_with_bad_dns_names(self):
num_labels = int(
math.floor(dns.FQDN_MAX_LEN / dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor(dns.FQDN_MAX_LEN % dns.DNS_LABEL_MAX_LEN))
dns_names = [555, '\f\n\r', '.', '-vm01', '_vm01', 'vm01-',
'-vm01.test1', 'vm01.-test1', 'vm01._test1',
'vm01.test1-', 'vm01.te$t1', 'vm0#1.test1.',
'vm01.123.', '-' + 'a' * dns.DNS_LABEL_MAX_LEN,
'a' * (dns.DNS_LABEL_MAX_LEN + 1),
('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len + 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(res.status_code, 400)
is_expected_message = (
'cannot be converted to lowercase string' in res.text or
'not a valid PQDN or FQDN. Reason:' in res.text)
self.assertTrue(is_expected_message)
def test_api_extension_validation_with_good_dns_names(self):
cfg.CONF.set_override('dns_domain', 'example.com')
higher_labels_len = len('example.com.')
num_labels = int(
math.floor((dns.FQDN_MAX_LEN - higher_labels_len) /
dns.DNS_LABEL_MAX_LEN))
filler_len = int(
math.floor((dns.FQDN_MAX_LEN - higher_labels_len) %
dns.DNS_LABEL_MAX_LEN))
dns_names = ['', 'www.1000.com', 'vM01', 'vm01.example.com.',
'8vm01', 'vm-01.example.com.', 'vm01.test',
'vm01.test.example.com.', 'vm01.test-100',
'vm01.test-100.example.com.',
'a' * dns.DNS_LABEL_MAX_LEN,
('a' * dns.DNS_LABEL_MAX_LEN) + '.example.com.',
('a' * (dns.DNS_LABEL_MAX_LEN - 1) + '.') *
num_labels + 'a' * (filler_len - 1)]
res = self._create_network(fmt=self.fmt, name='net',
admin_state_up=True)
network = self.deserialize(self.fmt, res)
sub_dict = {'gateway': '10.0.0.1', 'cidr': '10.0.0.0/24',
'ip_version': 4, 'ra_addr_mode': None}
self._make_subnet(self.fmt, network, gateway=sub_dict['gateway'],
cidr=sub_dict['cidr'],
ip_version=sub_dict['ip_version'],
ipv6_ra_mode=sub_dict['ra_addr_mode'],
ipv6_address_mode=sub_dict['ra_addr_mode'])
for dns_name in dns_names:
res = self._create_port(self.fmt, net_id=network['network']['id'],
dns_name=dns_name)
self.assertEqual(res.status_code, 201)
| apache-2.0 | -6,871,615,773,267,328,000 | 46.541578 | 79 | 0.525586 | false |
sahana/Turkey | modules/s3/sync_adapter/ccrm.py | 8 | 13047 | # -*- coding: utf-8 -*-
""" S3 Synchronization: Peer Repository Adapter
@copyright: 2012-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import urllib, urllib2
from gluon import *
from gluon.storage import Storage
from ..s3sync import S3SyncBaseAdapter
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3SYNC: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class S3SyncAdapter(S3SyncBaseAdapter):
"""
CiviCRM Synchronization Adapter
@status: experimental
"""
# Resource map
RESOURCE = {
"pr_person": {
"q": "civicrm/contact",
"contact_type": "Individual"
},
}
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses:
# -------------------------------------------------------------------------
def register(self):
"""
Register this site at the peer repository
@return: True to indicate success, otherwise False
"""
# CiviCRM does not support via-web peer registration
return True
# -------------------------------------------------------------------------
def login(self):
"""
Login at the peer repository
@return: None if successful, otherwise the error
"""
_debug("S3SyncCiviCRM.login()")
repository = self.repository
request = {
"q": "civicrm/login",
"name": repository.username,
"pass": repository.password,
}
response, error = self._send_request(**request)
if error:
_debug("S3SyncCiviCRM.login FAILURE: %s" % error)
return error
api_key = response.findall("//api_key")
if len(api_key):
self.api_key = api_key[0].text
else:
error = "No API Key returned by CiviCRM"
_debug("S3SyncCiviCRM.login FAILURE: %s" % error)
return error
PHPSESSID = response.findall("//PHPSESSID")
if len(PHPSESSID):
self.PHPSESSID = PHPSESSID[0].text
else:
error = "No PHPSESSID returned by CiviCRM"
_debug("S3SyncCiviCRM.login FAILURE: %s" % error)
return error
_debug("S3SyncCiviCRM.login SUCCESS")
return None
# -------------------------------------------------------------------------
def pull(self, task, onconflict=None):
"""
Fetch updates from the peer repository and import them
into the local database (active pull)
@param task: the synchronization task (sync_task Row)
@param onconflict: callback for automatic conflict resolution
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
xml = current.xml
repository = self.repository
log = repository.log
resource_name = task.resource_name
_debug("S3SyncCiviCRM.pull(%s, %s)" % (repository.url, resource_name))
mtime = None
message = ""
remote = False
# Construct the request
if resource_name not in self.RESOURCE:
result = log.FATAL
message = "Resource type %s currently not supported for CiviCRM synchronization" % \
resource_name
output = xml.json_message(False, 400, message)
else:
args = Storage(self.RESOURCE[resource_name])
args["q"] += "/get"
tree, error = self._send_request(method="GET", **args)
if error:
result = log.FATAL
remote = True
message = error
output = xml.json_message(False, 400, error)
elif len(tree.getroot()):
result = log.SUCCESS
remote = False
# Get import strategy and update policy
strategy = task.strategy
update_policy = task.update_policy
conflict_policy = task.conflict_policy
# Import stylesheet
folder = current.request.folder
import os
stylesheet = os.path.join(folder,
"static",
"formats",
"ccrm",
"import.xsl")
# Host name of the peer,
# used by the import stylesheet
import urlparse
hostname = urlparse.urlsplit(repository.url).hostname
# Import the data
resource = current.s3db.resource(resource_name)
if onconflict:
onconflict_callback = lambda item: onconflict(item,
repository,
resource)
else:
onconflict_callback = None
count = 0
success = True
try:
success = resource.import_xml(tree,
stylesheet=stylesheet,
ignore_errors=True,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=task.last_pull,
onconflict=onconflict_callback,
site=hostname)
count = resource.import_count
except IOError, e:
result = log.FATAL
message = "%s" % e
output = xml.json_message(False, 400, message)
mtime = resource.mtime
# Log all validation errors
if resource.error_tree is not None:
result = log.WARNING
message = "%s" % resource.error
for element in resource.error_tree.findall("resource"):
for field in element.findall("data[@error]"):
error_msg = field.get("error", None)
if error_msg:
msg = "(UID: %s) %s.%s=%s: %s" % \
(element.get("uuid", None),
element.get("name", None),
field.get("field", None),
field.get("value", field.text),
field.get("error", None))
message = "%s, %s" % (message, msg)
# Check for failure
if not success:
result = log.FATAL
if not message:
message = "%s" % resource.error
output = xml.json_message(False, 400, message)
mtime = None
# ...or report success
elif not message:
message = "Data imported successfully (%s records)" % count
output = None
else:
# No data received from peer
result = log.ERROR
remote = True
message = "No data received from peer"
output = None
# Log the operation
log.write(repository_id=repository.id,
resource_name=resource_name,
transmission=log.OUT,
mode=log.PULL,
action=None,
remote=remote,
result=result,
message=message)
_debug("S3SyncCiviCRM.pull import %s: %s" % (result, message))
return (output, mtime)
# -------------------------------------------------------------------------
def push(self, task):
"""
Extract new updates from the local database and send
them to the peer repository (active push)
@param task: the synchronization task (sync_task Row)
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
xml = current.xml
repository = self.repository
log = repository.log
resource_name = task.resource_name
_debug("S3SyncCiviCRM.push(%s, %s)" % (repository.url, resource_name))
result = log.FATAL
remote = False
message = "Push to CiviCRM currently not supported"
output = xml.json_message(False, 400, message)
# Log the operation
log.write(repository_id=repository.id,
resource_name=resource_name,
transmission=log.OUT,
mode=log.PUSH,
action=None,
remote=remote,
result=result,
message=message)
_debug("S3SyncCiviCRM.push export %s: %s" % (result, message))
return(output, None)
# -------------------------------------------------------------------------
# Internal methods:
# -------------------------------------------------------------------------
def _send_request(self, method="GET", **args):
repository = self.repository
config = repository.config
# Authentication
args = Storage(args)
if hasattr(self, "PHPSESSID") and self.PHPSESSID:
args["PHPSESSID"] = self.PHPSESSID
if hasattr(self, "api_key") and self.api_key:
args["api_key"] = self.api_key
if repository.site_key:
args["key"] = repository.site_key
# Create the request
url = repository.url + "?" + urllib.urlencode(args)
req = urllib2.Request(url=url)
handlers = []
# Proxy handling
proxy = repository.proxy or config.proxy or None
if proxy:
_debug("using proxy=%s" % proxy)
proxy_handler = urllib2.ProxyHandler({protocol: proxy})
handlers.append(proxy_handler)
# Install all handlers
if handlers:
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
# Execute the request
response = None
message = None
try:
if method == "POST":
f = urllib2.urlopen(req, data="")
else:
f = urllib2.urlopen(req)
except urllib2.HTTPError, e:
message = "HTTP %s: %s" % (e.code, e.reason)
else:
# Parse the response
tree = current.xml.parse(f)
root = tree.getroot()
#print current.xml.tostring(tree, pretty_print=True)
is_error = root.xpath("//ResultSet[1]/Result[1]/is_error")
if len(is_error) and int(is_error[0].text):
error = root.xpath("//ResultSet[1]/Result[1]/error_message")
if len(error):
message = error[0].text
else:
message = "Unknown error"
else:
response = tree
return response, message
# End =========================================================================
| mit | -8,122,125,277,936,852,000 | 35.141274 | 96 | 0.478961 | false |
jaredly/pyjamas | library/pyjamas/ui/PasswordTextBox.py | 4 | 1219 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from TextBoxBase import TextBoxBase
class PasswordTextBox(TextBoxBase):
def __init__(self, **kwargs):
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = DOM.createInputPassword()
self.setElement(element)
if not kwargs.has_key('StyleName'):
kwargs['StyleName']="gwt-PasswordTextBox"
TextBoxBase.__init__(self, element, **kwargs)
Factory.registerClass('pyjamas.ui.PasswordTextBox', PasswordTextBox)
| apache-2.0 | 3,784,743,705,021,941,000 | 37.09375 | 74 | 0.719442 | false |
skoenen/pyprol | tests/test_configuration.py | 1 | 2476 | from unittest import TestCase
from pyprol import configuration
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class ConfigurationTestCase(TestCase):
def assertHasAttribute(self, obj, attr):
self.assertTrue(hasattr(obj, attr))
def setUp(self):
self.polluted_config = {
"pyprol.storage": "sqlite://$HOME/pyprol_test.db",
"pyprol.instrumentations": "some.instrumentations.config, some.instrumentations.runner",
"someother.option": "some value",
"another.option": "with, a, list, value"}
self.clean_config = {
"pyprol.storage": "sqlite://$HOME/pyprol_test.db",
"pyprol.instrumentations": "some.instrumentations.config, some.instrumentations.runner"}
self.instrumentations = [
"some.instrumentations.config",
"some.instrumentations.runner",
"pyprol.instrumentations.paste",
"pyprol.instrumentations.pylons",
"pyprol.instrumentations.sqlalchemy"]
self.storage_endpoint = urlparse("sqlite://$HOME/pyprol_test.db")
def test_config_filter(self):
result = configuration.config_filter(self.polluted_config)
self.assertEqual(result, self.clean_config)
def test_instrumentation_list(self):
result = configuration.instrumentation_list(
self.clean_config["pyprol.instrumentations"])
self.assertEqual(result,
["some.instrumentations.config",
"some.instrumentations.runner"])
def test_configuration(self):
config = configuration.Configuration(self.polluted_config)
self.assertEqual(
config.instrumentations,
self.instrumentations)
self.assertEqual(
config.storage_endpoint,
self.storage_endpoint)
def test_additional_keys(self):
self.clean_config["pyprol.storage_server.storage_endpoint"] = "sqlite://$HOME/pyprol_server.db"
self.clean_config["pyprol.measure.save_process_wait"] = 1
config = configuration.Configuration(self.clean_config)
self.assertHasAttribute(config, "storage_server")
self.assertHasAttribute(config.storage_server, "storage_endpoint")
self.assertHasAttribute(config, "measure")
self.assertHasAttribute(config.measure, "save_process_wait")
| bsd-3-clause | -5,591,257,489,140,779,000 | 34.884058 | 104 | 0.641761 | false |
realsobek/freeipa | ipaclient/plugins/ca.py | 2 | 1405 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
import base64
from ipaclient.frontend import MethodOverride
from ipalib import util, x509, Str
from ipalib.plugable import Registry
from ipalib.text import _
register = Registry()
class WithCertOutArgs(MethodOverride):
takes_options = (
Str(
'certificate_out?',
doc=_('Write certificate (chain if --chain used) to file'),
include='cli',
cli_metavar='FILE',
),
)
def forward(self, *keys, **options):
filename = None
if 'certificate_out' in options:
filename = options.pop('certificate_out')
util.check_writable_file(filename)
result = super(WithCertOutArgs, self).forward(*keys, **options)
if filename:
def to_pem(x):
return x509.make_pem(x)
if options.get('chain', False):
ders = result['result']['certificate_chain']
data = '\n'.join(to_pem(base64.b64encode(der)) for der in ders)
else:
data = to_pem(result['result']['certificate'])
with open(filename, 'wb') as f:
f.write(data)
return result
@register(override=True, no_fail=True)
class ca_add(WithCertOutArgs):
pass
@register(override=True, no_fail=True)
class ca_show(WithCertOutArgs):
pass
| gpl-3.0 | -7,137,457,535,440,251,000 | 25.509434 | 79 | 0.592883 | false |
kernsuite-debian/lofar | CEP/Pipeline/recipes/sip/nodes/deprecated/demix/median_filter.py | 2 | 5224 | import os
import numpy as numpy
import math
from scipy import *
import scipy.signal as signal
def median_filter(ampl_tot, half_window, threshold):
q = 1.# 1.4826
ampl_tot_copy = numpy.copy(ampl_tot)
median_array = signal.medfilt(ampl_tot,half_window*2.-1)
# find the higher peaks
for i in range(len(median_array)):
thr = float(threshold*median_array[i]*q)
absvalue= abs(ampl_tot[i] - median_array[i])
if absvalue > thr :
ampl_tot_copy[i] = median_array[i]
# find the low values
ampl_div = 1./ampl_tot_copy
median_array = signal.medfilt(ampl_div,half_window*2.-1)
for i in range(len(median_array)):
thr = float(threshold*median_array[i]*q)
absvalue= abs(ampl_div[i] - median_array[i])
if absvalue > thr :
ampl_div[i] = median_array[i]
ampl_tot_copy = 1./ampl_div # get back
###### now do a second pass #######
median_array = signal.medfilt(ampl_tot,half_window*2.-1)
# find the higher peaks
for i in range(len(median_array)):
thr = float(threshold*median_array[i]*q)
absvalue= abs(ampl_tot[i] - median_array[i])
if absvalue > thr :
ampl_tot_copy[i] = median_array[i]
# find the low values
ampl_div = 1./ampl_tot_copy
median_array = signal.medfilt(ampl_div,half_window*2.-1)
for i in range(len(median_array)):
thr = float(threshold*median_array[i]*q)
absvalue= abs(ampl_div[i] - median_array[i])
if absvalue > thr :
ampl_div[i] = median_array[i]
ampl_tot_copy = 1./ampl_div # get back
#ampl_tot_copy = ampl_tot_copy*0. + 1.0
return ampl_tot_copy
def my_solflag(ampl, half_window, threshold):
""" Picked out the flagging part of solflag. ampl is a numpy array of
ONE series
of amplitudes. Bad data is False --- R. Niruj Mohan"""
import numpy as N
import math
ampl_tot_copy = numpy.copy(ampl)
median_array = signal.medfilt(ampl_tot_copy,half_window*2.-1)
ndata = len(ampl)
flags = N.zeros(ndata, dtype=bool)
sol = N.zeros(ndata+2*half_window)
sol[half_window:half_window+ndata] = ampl
for i in range(0, half_window):
# Mirror at left edge.
idx = min(ndata-1, half_window-i)
sol[i] = ampl[idx]
# Mirror at right edge
idx = max(0, ndata-2-i)
sol[ndata+half_window+i] = ampl[idx]
sol_flag = N.zeros(ndata+2*half_window, dtype=bool)
sol_flag_val = N.zeros(ndata+2*half_window, dtype=bool)
for i in range(half_window, half_window + ndata):
# Compute median of the absolute distance to the median.
window = sol[i-half_window:i+half_window+1]
window_flag = sol_flag[i-half_window:i+half_window+1]
window_masked = window[~window_flag]
if len(window_masked) < math.sqrt(len(window)):
# Not enough data to get accurate statistics.
continue
median = N.median(window_masked)
q = 1.4826 * N.median(N.abs(window_masked - median))
# Flag sample if it is more than 1.4826 * threshold * the
# median distance away from the median.
if abs(sol[i] - median) > (threshold * q):
sol_flag[i] = True
mask = sol_flag[half_window:half_window + ndata]
for i in range(len(mask)):
if mask[i]:
ampl_tot_copy[i] = median_array[i]
return ampl_tot_copy
def my_solflag_inv(ampl, half_window, threshold):
""" Picked out the flagging part of solflag. ampl is a numpy array of
ONE series
of amplitudes. Bad data is False --- R. Niruj Mohan"""
import numpy as N
import math
ampl_tot_copy = 1./numpy.copy(ampl)
median_array = signal.medfilt(ampl_tot_copy,half_window*2.-1)
ndata = len(ampl)
flags = N.zeros(ndata, dtype=bool)
sol = N.zeros(ndata+2*half_window)
sol[half_window:half_window+ndata] = ampl
for i in range(0, half_window):
# Mirror at left edge.
idx = min(ndata-1, half_window-i)
sol[i] = ampl[idx]
# Mirror at right edge
idx = max(0, ndata-2-i)
sol[ndata+half_window+i] = ampl[idx]
sol_flag = N.zeros(ndata+2*half_window, dtype=bool)
sol_flag_val = N.zeros(ndata+2*half_window, dtype=bool)
for i in range(half_window, half_window + ndata):
# Compute median of the absolute distance to the median.
window = sol[i-half_window:i+half_window+1]
window_flag = sol_flag[i-half_window:i+half_window+1]
window_masked = window[~window_flag]
if len(window_masked) < math.sqrt(len(window)):
# Not enough data to get accurate statistics.
continue
median = N.median(window_masked)
q = 1.4826 * N.median(N.abs(window_masked - median))
# Flag sample if it is more than 1.4826 * threshold * the
# median distance away from the median.
if abs(sol[i] - median) > (threshold * q):
sol_flag[i] = True
mask = sol_flag[half_window:half_window + ndata]
for i in range(len(mask)):
if mask[i]:
ampl_tot_copy[i] = median_array[i]
return 1./ampl_tot_copy
| gpl-3.0 | 6,011,674,521,633,235,000 | 30.853659 | 73 | 0.602603 | false |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/turtledemo/__main__.py | 10 | 9375 | #!/usr/bin/env python3
import sys
import os
from tkinter import *
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.textView import view_file # TextViewer
from importlib import reload
import turtle
import time
demo_dir = os.path.dirname(os.path.abspath(__file__))
STARTUP = 1
READY = 2
RUNNING = 3
DONE = 4
EVENTDRIVEN = 5
menufont = ("Arial", 12, NORMAL)
btnfont = ("Arial", 12, 'bold')
txtfont = ('Lucida Console', 8, 'normal')
def getExampleEntries():
return [entry[:-3] for entry in os.listdir(demo_dir) if
entry.endswith(".py") and entry[0] != '_']
def showDemoHelp():
view_file(demo.root, "Help on turtleDemo",
os.path.join(demo_dir, "demohelp.txt"))
def showAboutDemo():
view_file(demo.root, "About turtleDemo",
os.path.join(demo_dir, "about_turtledemo.txt"))
def showAboutTurtle():
view_file(demo.root, "About the new turtle module.",
os.path.join(demo_dir, "about_turtle.txt"))
class DemoWindow(object):
def __init__(self, filename=None): #, root=None):
self.root = root = turtle._root = Tk()
root.wm_protocol("WM_DELETE_WINDOW", self._destroy)
#################
self.mBar = Frame(root, relief=RAISED, borderwidth=2)
self.mBar.pack(fill=X)
self.ExamplesBtn = self.makeLoadDemoMenu()
self.OptionsBtn = self.makeHelpMenu()
self.mBar.tk_menuBar(self.ExamplesBtn, self.OptionsBtn) #, QuitBtn)
root.title('Python turtle-graphics examples')
#################
self.left_frame = left_frame = Frame(root)
self.text_frame = text_frame = Frame(left_frame)
self.vbar = vbar =Scrollbar(text_frame, name='vbar')
self.text = text = Text(text_frame,
name='text', padx=5, wrap='none',
width=45)
vbar['command'] = text.yview
vbar.pack(side=LEFT, fill=Y)
#####################
self.hbar = hbar =Scrollbar(text_frame, name='hbar', orient=HORIZONTAL)
hbar['command'] = text.xview
hbar.pack(side=BOTTOM, fill=X)
#####################
text['yscrollcommand'] = vbar.set
text.config(font=txtfont)
text.config(xscrollcommand=hbar.set)
text.pack(side=LEFT, fill=Y, expand=1)
#####################
self.output_lbl = Label(left_frame, height= 1,text=" --- ", bg = "#ddf",
font = ("Arial", 16, 'normal'))
self.output_lbl.pack(side=BOTTOM, expand=0, fill=X)
#####################
text_frame.pack(side=LEFT, fill=BOTH, expand=0)
left_frame.pack(side=LEFT, fill=BOTH, expand=0)
self.graph_frame = g_frame = Frame(root)
turtle._Screen._root = g_frame
turtle._Screen._canvas = turtle.ScrolledCanvas(g_frame, 800, 600, 1000, 800)
#xturtle.Screen._canvas.pack(expand=1, fill="both")
self.screen = _s_ = turtle.Screen()
#####
turtle.TurtleScreen.__init__(_s_, _s_._canvas)
#####
self.scanvas = _s_._canvas
#xturtle.RawTurtle.canvases = [self.scanvas]
turtle.RawTurtle.screens = [_s_]
self.scanvas.pack(side=TOP, fill=BOTH, expand=1)
self.btn_frame = btn_frame = Frame(g_frame, height=100)
self.start_btn = Button(btn_frame, text=" START ", font=btnfont, fg = "white",
disabledforeground = "#fed", command=self.startDemo)
self.start_btn.pack(side=LEFT, fill=X, expand=1)
self.stop_btn = Button(btn_frame, text=" STOP ", font=btnfont, fg = "white",
disabledforeground = "#fed", command = self.stopIt)
self.stop_btn.pack(side=LEFT, fill=X, expand=1)
self.clear_btn = Button(btn_frame, text=" CLEAR ", font=btnfont, fg = "white",
disabledforeground = "#fed", command = self.clearCanvas)
self.clear_btn.pack(side=LEFT, fill=X, expand=1)
self.btn_frame.pack(side=TOP, fill=BOTH, expand=0)
self.graph_frame.pack(side=TOP, fill=BOTH, expand=1)
Percolator(text).insertfilter(ColorDelegator())
self.dirty = False
self.exitflag = False
if filename:
self.loadfile(filename)
self.configGUI(NORMAL, DISABLED, DISABLED, DISABLED,
"Choose example from menu", "black")
self.state = STARTUP
def _destroy(self):
self.root.destroy()
sys.exit()
def configGUI(self, menu, start, stop, clear, txt="", color="blue"):
self.ExamplesBtn.config(state=menu)
self.start_btn.config(state=start)
if start == NORMAL:
self.start_btn.config(bg="#d00")
else:
self.start_btn.config(bg="#fca")
self.stop_btn.config(state=stop)
if stop == NORMAL:
self.stop_btn.config(bg="#d00")
else:
self.stop_btn.config(bg="#fca")
self.clear_btn.config(state=clear)
self.clear_btn.config(state=clear)
if clear == NORMAL:
self.clear_btn.config(bg="#d00")
else:
self.clear_btn.config(bg="#fca")
self.output_lbl.config(text=txt, fg=color)
def makeLoadDemoMenu(self):
CmdBtn = Menubutton(self.mBar, text='Examples', underline=0, font=menufont)
CmdBtn.pack(side=LEFT, padx="2m")
CmdBtn.menu = Menu(CmdBtn)
for entry in getExampleEntries():
def loadexample(x):
def emit():
self.loadfile(x)
return emit
CmdBtn.menu.add_command(label=entry, underline=0,
font=menufont, command=loadexample(entry))
CmdBtn['menu'] = CmdBtn.menu
return CmdBtn
def makeHelpMenu(self):
CmdBtn = Menubutton(self.mBar, text='Help', underline=0, font=menufont)
CmdBtn.pack(side=LEFT, padx='2m')
CmdBtn.menu = Menu(CmdBtn)
CmdBtn.menu.add_command(label='About turtle.py', font=menufont,
command=showAboutTurtle)
CmdBtn.menu.add_command(label='turtleDemo - Help', font=menufont,
command=showDemoHelp)
CmdBtn.menu.add_command(label='About turtleDemo', font=menufont,
command=showAboutDemo)
CmdBtn['menu'] = CmdBtn.menu
return CmdBtn
def refreshCanvas(self):
if not self.dirty: return
self.screen.clear()
#self.screen.mode("standard")
self.dirty=False
def loadfile(self, filename):
self.refreshCanvas()
modname = 'turtledemo.' + filename
__import__(modname)
self.module = sys.modules[modname]
with open(self.module.__file__, 'r') as f:
chars = f.read()
self.text.delete("1.0", "end")
self.text.insert("1.0", chars)
self.root.title(filename + " - a Python turtle graphics example")
reload(self.module)
self.configGUI(NORMAL, NORMAL, DISABLED, DISABLED,
"Press start button", "red")
self.state = READY
def startDemo(self):
self.refreshCanvas()
self.dirty = True
turtle.TurtleScreen._RUNNING = True
self.configGUI(DISABLED, DISABLED, NORMAL, DISABLED,
"demo running...", "black")
self.screen.clear()
self.screen.mode("standard")
self.state = RUNNING
try:
result = self.module.main()
if result == "EVENTLOOP":
self.state = EVENTDRIVEN
else:
self.state = DONE
except turtle.Terminator:
self.state = DONE
result = "stopped!"
if self.state == DONE:
self.configGUI(NORMAL, NORMAL, DISABLED, NORMAL,
result)
elif self.state == EVENTDRIVEN:
self.exitflag = True
self.configGUI(DISABLED, DISABLED, NORMAL, DISABLED,
"use mouse/keys or STOP", "red")
def clearCanvas(self):
self.refreshCanvas()
self.screen._delete("all")
self.scanvas.config(cursor="")
self.configGUI(NORMAL, NORMAL, DISABLED, DISABLED)
def stopIt(self):
if self.exitflag:
self.clearCanvas()
self.exitflag = False
self.configGUI(NORMAL, NORMAL, DISABLED, DISABLED,
"STOPPED!", "red")
turtle.TurtleScreen._RUNNING = False
#print "stopIT: exitflag = True"
else:
turtle.TurtleScreen._RUNNING = False
#print "stopIt: exitflag = False"
if __name__ == '__main__':
demo = DemoWindow()
RUN = True
while RUN:
try:
#print("ENTERING mainloop")
demo.root.mainloop()
except AttributeError:
#print("AttributeError!- WAIT A MOMENT!")
time.sleep(0.3)
print("GOING ON ..")
demo.ckearCanvas()
except TypeError:
demo.screen._delete("all")
#print("CRASH!!!- WAIT A MOMENT!")
time.sleep(0.3)
#print("GOING ON ..")
demo.clearCanvas()
except:
print("BYE!")
RUN = False
| gpl-2.0 | -7,332,869,846,169,399,000 | 34.244361 | 88 | 0.55584 | false |
janusnic/shoop | shoop_tests/notify/test_template.py | 7 | 1867 | # -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import pytest
from shoop.notify import Context
from shoop.notify.template import NoLanguageMatches
from shoop_tests.notify.fixtures import (get_test_template, TEST_TEMPLATE_LANGUAGES,
TestTemplateUsingAction, TEST_TEMPLATE_DATA, TestUnilingualTemplateUsingAction,
TEST_UNI_TEMPLATE_DATA
)
def test_template_render(template=None):
template = get_test_template()
# since both subject and body are required, "sw" is disqualified
japanese_render = template.render_first_match(TEST_TEMPLATE_LANGUAGES, ("subject", "body"))
assert japanese_render["_language"] == "ja"
# test that |upper worked
assert template.context.get("name").upper() in japanese_render["body"]
def test_some_fields_language_fallback():
template = get_test_template()
assert template.render_first_match(TEST_TEMPLATE_LANGUAGES, ("body",))["_language"] == "sw"
def test_no_language_matches():
template = get_test_template()
with pytest.raises(NoLanguageMatches):
template.render_first_match(("xx",), ("body",))
def test_template_in_action():
ac = TestTemplateUsingAction(data={"template_data": TEST_TEMPLATE_DATA})
context = Context.from_variables(name=u"Sir Test")
template = ac.get_template(context)
test_template_render(template)
japanese_render = ac.get_template_values(context, ("ja",))
name = template.context.get("name")
assert name.upper() in japanese_render["body"]
ac = TestUnilingualTemplateUsingAction(data={"template_data": TEST_UNI_TEMPLATE_DATA})
assert name in ac.get_template_values(context)["subject"]
| agpl-3.0 | 8,751,488,560,616,650,000 | 38.723404 | 95 | 0.724156 | false |
davidcoallier/blaze | blaze/datashape/tests/test_parser.py | 2 | 3726 | from blaze.datashape import *
from blaze.datashape.parse import parse, load
from blaze.datashape.record import RecordDecl, derived
from textwrap import dedent
from unittest import skip
def test_simple_parse():
x = parse('800, 600, RGBA')
y = parse('Enum (1,2)')
z = parse('300 , 400, Record(x=int64, y=int32)')
assert type(x) is DataShape
assert type(y) is DataShape
assert type(z) is DataShape
assert type(x[0]) is Fixed
assert type(y[0]) is Enum
assert type(z[0]) is Fixed
assert type(z[1]) is Fixed
assert type(z[2]) is Record
assert z[2]('x') is int64
assert z[2]('y') is int32
def test_compound_record():
p = parse('6, Record(x=int, y=float, z=str)')
assert type(p[0]) is Fixed
assert type(p[1]) is Record
def test_free_variables():
p = parse('N, M, 800, 600, RGBA')
assert type(p[0]) is TypeVar
assert type(p[1]) is TypeVar
assert type(p[2]) is Fixed
assert type(p[3]) is Fixed
assert type(p[4]) is Record
assert p[4]('R') is int16
assert p[4]('G') is int16
assert p[4]('B') is int16
assert p[4]('A') is int8
def test_flat_datashape():
p = parse('N, M, 800, 600, (int16, int16, int16, int8)')
assert type(p[0]) is TypeVar
assert type(p[1]) is TypeVar
assert type(p[2]) is Fixed
assert type(p[3]) is Fixed
assert p[4:8] == (int16, int16, int16, int8)
def test_flatten1():
x = parse('a, ( b, ( c, ( d ) ) )')
y = parse('a, b, c, d')
assert len(x.parameters) == len(y.parameters)
assert x[0].symbol == 'a'
assert x[1].symbol == 'b'
assert x[2].symbol == 'c'
assert x[3].symbol == 'd'
assert y[0].symbol == 'a'
assert y[1].symbol == 'b'
assert y[2].symbol == 'c'
assert y[3].symbol == 'd'
assert x.parameters == y.parameters
def test_flatten2():
x = parse('a, ( b, ( c, d ) )')
y = parse('a, b, c, d')
assert len(x.parameters) == len(y.parameters)
assert x[0].symbol == 'a'
assert x[1].symbol == 'b'
assert x[2].symbol == 'c'
assert x[3].symbol == 'd'
assert y[0].symbol == 'a'
assert y[1].symbol == 'b'
assert y[2].symbol == 'c'
assert y[3].symbol == 'd'
assert x.parameters == y.parameters
def test_parse_equality():
x = parse('800, 600, int64')
y = parse('800, 600, int64')
assert x._equal(y)
def test_parse_fixed_integer_diff():
x = parse('1, int32')
y = parse('{1}, int32')
assert type(x[0]) is Fixed
assert type(y[0][0]) is Integer
def test_parse_ctypes():
x = parse('800, 600, double')
y = parse('800, 600, PyObject')
def test_parse_vars():
x = parse('Range(1,2)')
assert x[0].lower == 1
assert x[0].upper == 2
def test_parse_na():
x = parse('NA')
assert x[0] is na
def test_parse_either():
x = parse('Either(int64, NA)')
assert x[0].a == int64
assert x[0].b is na
def test_custom_record():
class Stock1(RecordDecl):
name = string
open = float_
close = float_
max = int64
min = int64
volume = float_
@derived('int64')
def mid(self):
return (self.min + self.max)/2
assert Stock1.mid
@skip
def test_custom_record_infer():
class Stock2(RecordDecl):
name = string
open = float_
close = float_
max = int64
min = int64
volume = float_
@derived()
def mid(self):
return (self.min + self.max)/2
assert Stock2.mid
@skip
def test_module_parse():
mod = load('tests/foo.ds')
assert 'A' in dir(mod)
assert type(mod.B) is DataShape
assert 'B' in dir(mod)
assert type(mod.A) is DataShape
| bsd-2-clause | 7,589,067,548,018,870,000 | 21.581818 | 60 | 0.566023 | false |
keisuke-umezawa/chainer | chainer/training/extensions/evaluator.py | 2 | 9532 | import copy
import warnings
import six
from chainer import backend
from chainer import configuration
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer import function
from chainer import iterators
from chainer import link
from chainer import reporter as reporter_module
from chainer.training import extension
class Evaluator(extension.Extension):
"""Trainer extension to evaluate models on a validation set.
This extension evaluates the current models by a given evaluation function.
It creates a :class:`~chainer.Reporter` object to store values observed in
the evaluation function on each iteration. The report for all iterations
are aggregated to :class:`~chainer.DictSummary`. The collected mean values
are further reported to the reporter object of the trainer, where the name
of each observation is prefixed by the evaluator name. See
:class:`~chainer.Reporter` for details in naming rules of the reports.
Evaluator has a structure to customize similar to that of
:class:`~chainer.training.updaters.StandardUpdater`.
The main differences are:
- There are no optimizers in an evaluator. Instead, it holds links
to evaluate.
- An evaluation loop function is used instead of an update function.
- Preparation routine can be customized, which is called before each
evaluation. It can be used, e.g., to initialize the state of stateful
recurrent networks.
There are two ways to modify the evaluation behavior besides setting a
custom evaluation function. One is by setting a custom evaluation loop via
the ``eval_func`` argument. The other is by inheriting this class and
overriding the :meth:`evaluate` method. In latter case, users have to
create and handle a reporter object manually. Users also have to copy the
iterators before using them, in order to reuse them at the next time of
evaluation. In both cases, the functions are called in testing mode
(i.e., ``chainer.config.train`` is set to ``False``).
This extension is called at the end of each epoch by default.
Args:
iterator: Dataset iterator for the validation dataset. It can also be
a dictionary of iterators. If this is just an iterator, the
iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays.
:func:`~chainer.dataset.concat_examples` is used by default.
device: Device to which the validation data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
Attributes:
converter: Converter function.
device: Device to which the validation data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
"""
trigger = 1, 'epoch'
default_name = 'validation'
priority = extension.PRIORITY_WRITER
name = None
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None):
if device is not None:
device = backend.get_device(device)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if isinstance(target, link.Link):
target = {'main': target}
self._targets = target
self.converter = converter
self.device = device
self.eval_hook = eval_hook
self.eval_func = eval_func
for key, iter in six.iteritems(iterator):
if (isinstance(iter, (iterators.SerialIterator,
iterators.MultiprocessIterator,
iterators.MultithreadIterator)) and
getattr(iter, 'repeat', False)):
msg = 'The `repeat` property of the iterator {} '
'is set to `True`. Typically, the evaluator sweeps '
'over iterators until they stop, '
'but as the property being `True`, this iterator '
'might not stop and evaluation could go into '
'an infinite loop.'
'We recommend to check the configuration '
'of iterators'.format(key)
warnings.warn(msg)
def get_iterator(self, name):
"""Returns the iterator of the given name."""
return self._iterators[name]
def get_all_iterators(self):
"""Returns a dictionary of all iterators."""
return dict(self._iterators)
def get_target(self, name):
"""Returns the target link of the given name."""
return self._targets[name]
def get_all_targets(self):
"""Returns a dictionary of all target links."""
return dict(self._targets)
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name,
target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate()
reporter_module.report(result)
return result
def evaluate(self):
"""Evaluates the model and returns a result dictionary.
This method runs the evaluation loop over the validation dataset. It
accumulates the reported values to :class:`~chainer.DictSummary` and
returns a dictionary whose values are means computed by the summary.
Note that this function assumes that the main iterator raises
``StopIteration`` or code in the evaluation loop raises an exception.
So, if this assumption is not held, the function could be caught in
an infinite loop.
Users can override this method to customize the evaluation routine.
.. note::
This method encloses :attr:`eval_func` calls with
:func:`function.no_backprop_mode` context, so all calculations
using :class:`~chainer.FunctionNode`\\s inside
:attr:`eval_func` do not make computational graphs. It is for
reducing the memory consumption.
Returns:
dict: Result dictionary. This dictionary is further reported via
:func:`~chainer.report` without specifying any observer.
"""
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
warnings.warn(
'This iterator does not have the reset method. Evaluator '
'copies the iterator instead of resetting. This behavior is '
'deprecated. Please implement the reset method.',
DeprecationWarning)
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
in_arrays = convert._call_converter(
self.converter, batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
eval_func(*in_arrays)
elif isinstance(in_arrays, dict):
eval_func(**in_arrays)
else:
eval_func(in_arrays)
summary.add(observation)
return summary.compute_mean()
def finalize(self):
"""Finalizes the evaluator object.
This method calls the `finalize` method of each iterator that
this evaluator has.
It is called at the end of training loops.
"""
for iterator in six.itervalues(self._iterators):
iterator.finalize()
| mit | 7,844,293,341,900,906,000 | 39.05042 | 79 | 0.635229 | false |
ambv/django-crystal-big | setup.py | 1 | 2332 | # -*- encoding: utf-8 -*-
# Copyright (C) 2010 LangaCore
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import sys
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as ld_file:
long_description = ld_file.read()
setup (
name = 'django-crystal-big',
version = '2011.20.10',
author = 'Everaldo Coehlo, Lukasz Langa',
author_email = '[email protected], [email protected]',
description = "Everaldo's Crystal icons (big sizes) bundled for direct consumption "
"from Django applications",
long_description = long_description,
url = 'http://www.everaldo.com/crystal/',
keywords = 'django big crystal icons everaldo lgpl static',
platforms = ['any'],
license = 'LGPL',
packages = find_packages('src'),
include_package_data = True,
package_dir = {'':'src'},
zip_safe = False, # because executing support extensions for settings.py
# requires actual files
install_requires = [
'django>=1.2',
],
classifiers = [
'Development Status :: 6 - Mature',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| lgpl-3.0 | 6,208,730,846,566,288,000 | 37.229508 | 89 | 0.649657 | false |
grace-/opencv-3.0.0-cvpr | opencv/samples/python2/distrans.py | 7 | 1401 | #!/usr/bin/env python
'''
Distance transform sample.
Usage:
distrans.py [<image>]
Keys:
ESC - exit
v - toggle voronoi mode
'''
import numpy as np
import cv2
import cv2.cv as cv
from common import make_cmap
if __name__ == '__main__':
import sys
try: fn = sys.argv[1]
except: fn = '../cpp/fruits.jpg'
print __doc__
img = cv2.imread(fn, 0)
cm = make_cmap('jet')
need_update = True
voronoi = False
def update(dummy=None):
global need_update
need_update = False
thrs = cv2.getTrackbarPos('threshold', 'distrans')
mark = cv2.Canny(img, thrs, 3*thrs)
dist, labels = cv2.distanceTransformWithLabels(~mark, cv.CV_DIST_L2, 5)
if voronoi:
vis = cm[np.uint8(labels)]
else:
vis = cm[np.uint8(dist*2)]
vis[mark != 0] = 255
cv2.imshow('distrans', vis)
def invalidate(dummy=None):
global need_update
need_update = True
cv2.namedWindow('distrans')
cv2.createTrackbar('threshold', 'distrans', 60, 255, invalidate)
update()
while True:
ch = 0xFF & cv2.waitKey(50)
if ch == 27:
break
if ch == ord('v'):
voronoi = not voronoi
print 'showing', ['distance', 'voronoi'][voronoi]
update()
if need_update:
update()
cv2.destroyAllWindows()
| bsd-3-clause | -3,762,765,117,320,447,000 | 21.238095 | 79 | 0.556031 | false |
destijl/forensicartifacts | frontend/thirdparty/networkx-1.9/examples/advanced/eigenvalues.py | 16 | 1086 | #!/usr/bin/env python
"""
Create an G{n,m} random graph and compute the eigenvalues.
Requires numpy or LinearAlgebra package from Numeric Python.
Uses optional pylab plotting to produce histogram of eigenvalues.
"""
__author__ = """Aric Hagberg ([email protected])"""
__credits__ = """"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from networkx import *
try:
import numpy.linalg
eigenvalues=numpy.linalg.eigvals
except ImportError:
raise ImportError("numpy can not be imported.")
try:
from pylab import *
except:
pass
n=1000 # 1000 nodes
m=5000 # 5000 edges
G=gnm_random_graph(n,m)
L=generalized_laplacian(G)
e=eigenvalues(L)
print("Largest eigenvalue:", max(e))
print("Smallest eigenvalue:", min(e))
# plot with matplotlib if we have it
# shows "semicircle" distribution of eigenvalues
try:
hist(e,bins=100) # histogram with 100 bins
xlim(0,2) # eigenvalues between 0 and 2
show()
except:
pass
| apache-2.0 | -2,577,404,059,253,888,500 | 21.625 | 65 | 0.696133 | false |
justingrayston/jaikuengine | common/messages.py | 34 | 3331 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User-visible strings for confirmation and flash messages.
"""
__author__ = '[email protected] (Mika Raento)'
# api call -> (confirmation message, flash message)
# If the confirmation message is None, no confirmation is required.
_message_table__ = {
'activation_activate_mobile':
(None,
'Mobile activated.'),
'activation_request_email':
(None,
'Email confirmation has been sent.'),
'activation_request_mobile':
(None,
'Mobile activation code has been sent.'),
'actor_add_contact':
(None,
'Contact added.'),
'actor_remove' :
(None,
'Deleted'),
'actor_remove_contact':
(None,
'Contact removed.'),
'channel_create':
(None,
'Channel created'),
'channel_join':
(None,
'You have joined the channel.'),
'channel_update':
(None,
'Channel settings updated.'),
'channel_part':
(None,
'You have left the channel.'),
'channel_post':
(None,
'Message posted.'),
'entry_add_comment':
(None,
'Comment added.'),
'entry_mark_as_spam':
('Mark this item as spam',
'Marked as spam.'),
'entry_remove' :
('Delete this post',
'Post deleted.'),
'entry_remove_comment':
('Delete this comment',
'Comment deleted.'),
'invite_accept':
(None,
'Invitation accepted'),
'invite_reject':
(None,
'Invitation rejected'),
'invite_request_email':
(None,
'Invitation sent'),
'login_forgot':
(None,
'New Password Emailed'),
'oauth_consumer_delete':
('Delete this key',
'API Key deleted'),
'oauth_consumer_update':
(None,
'API Key information updated'),
'oauth_generate_consumer':
(None,
'New API key generated'),
'oauth_revoke_access_token':
(None,
'API token revoked.'),
'presence_set':
(None,
'Location updated'),
'post':
(None,
'Message posted.'),
'settings_change_notify':
(None,
'Settings updated.'),
'settings_change_privacy':
(None,
'privacy updated'),
'settings_hide_comments':
(None,
'Comments preferenced stored.'),
'settings_update_account':
(None,
'profile updated'),
'subscription_remove':
(None,
'Unsubscribed.'),
'subscription_request':
(None,
'Subscription requested.'),
}
def confirmation(api_call):
msg = title(api_call)
if msg is None:
return None
return ('Are you sure you want to ' +
msg +
'?')
def title(api_call):
if _message_table__.has_key(api_call):
return _message_table__[api_call][0]
return None
def flash(api_call):
return _message_table__[api_call][1]
| apache-2.0 | 199,580,153,979,323,700 | 24.234848 | 74 | 0.603723 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.