repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
redhat-cip/managesf | managesf/model/__init__.py | 2 | 13220 | #
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from pecan import conf # noqa
from sqlalchemy import create_engine, Column, String, Unicode, UnicodeText
from sqlalchemy import Boolean, Integer, exc, event
from sqlalchemy import ForeignKey, UniqueConstraint
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from sqlalchemy.ext.declarative import declarative_base
from contextlib import contextmanager
Base = declarative_base()
engine = None
logger = logging.getLogger(__name__)
def row2dict(row):
ret = {}
for column in row.__table__.columns:
ret[column.name] = getattr(row, column.name)
# TODO: Fix test and remove bellow hack!
if not isinstance(ret[column.name], basestring) and \
not isinstance(ret[column.name], bool):
ret[column.name] = str(ret[column.name])
return ret
class User(Base):
__tablename__ = 'users'
username = Column(Unicode(255), primary_key=True)
fullname = Column(Unicode(255), nullable=False)
email = Column(String(255), nullable=False)
hashed_password = Column(String(255), nullable=False)
sshkey = Column(String(1023), nullable=True)
def checkout_listener(dbapi_con, con_record, con_proxy):
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError as e:
if e.args[0] in (2006, # MySQL server has gone away
2013, # Lost connection to server during query
2055): # Lost connection to server
# caught by pool, which will retry with a new connection
raise exc.DisconnectionError()
else:
raise
class SFUser(Base):
__tablename__ = 'SF_USERS'
id = Column(Integer(), primary_key=True)
username = Column(Unicode(255), nullable=False, unique=True)
fullname = Column(Unicode(255), nullable=True)
# Gerrit requires email unicity
email = Column(String(255), nullable=False, unique=True)
cauth_id = Column(Integer(), nullable=False)
idp_sync = Column(Boolean(), default=True)
class SFUserServiceMapping(Base):
__tablename__ = 'SF_USERS_SERVICES_MAPPING'
# needed for constraint definition, not actually used
id = Column(Integer(), primary_key=True)
sf_user_id = Column(Integer(), ForeignKey('SF_USERS.id'),
nullable=False)
# will simply store the plugin name
service = Column(String(255), nullable=False)
# for extended future compatibility, don't limit to integers
service_user_id = Column(String(255), nullable=False)
__table_args__ = (UniqueConstraint('sf_user_id',
'service',
'service_user_id',
name='unique_service_user'), )
class SFUserCRUD:
def set_service_mapping(self, sf_user_id, service, service_user_id):
with session_scope() as session:
r = SFUserServiceMapping(sf_user_id=sf_user_id,
service=service,
service_user_id=service_user_id)
session.add(r)
def get_service_mapping(self, service, sf_user_id):
with session_scope() as session:
filtering = {'service': service,
'sf_user_id': sf_user_id}
try:
r = session.query(SFUserServiceMapping).filter_by(**filtering)
return r.one().service_user_id
except MultipleResultsFound:
msg = 'Too many mappings for user #%s on service %s'
raise KeyError(msg % (sf_user_id, service))
except NoResultFound:
return None
def get_user_mapping(self, service, service_user_id):
with session_scope() as session:
filtering = {'service': service,
'service_user_id': service_user_id}
try:
r = session.query(SFUserServiceMapping).filter_by(**filtering)
return r.one().service_user_id
except MultipleResultsFound:
msg = 'Too many mappings for service %s\'s user #%s'
raise KeyError(msg % (service_user_id, service))
except NoResultFound:
return None
def delete_service_mapping(self, sf_user_id,
service=None, service_user_id=None):
with session_scope() as session:
filtering = {'sf_user_id': sf_user_id}
if service:
filtering['service'] = service
if service_user_id:
filtering['service_user_id'] = service_user_id
m = session.query(SFUserServiceMapping).filter_by(**filtering)
m.delete(synchronize_session=False)
def get(self, id=None, username=None, email=None,
fullname=None, cauth_id=None):
with session_scope() as session:
if (id or username or email or fullname or cauth_id):
filtering = {}
if id:
filtering['id'] = id
if username:
filtering['username'] = username
if email:
filtering['email'] = email
if fullname:
filtering['fullname'] = fullname
if cauth_id:
filtering['cauth_id'] = cauth_id
try:
ret = session.query(SFUser).filter_by(**filtering).one()
return row2dict(ret)
except MultipleResultsFound:
# TODO(mhu) find a better Error
raise KeyError('search returned more than one result')
except NoResultFound:
return {}
else:
# all()
all = [row2dict(ret) for ret in session.query(SFUser)]
return all
def update(self, id, username=None, email=None,
fullname=None, cauth_id=None, idp_sync=None):
with session_scope() as session:
try:
ret = session.query(SFUser).filter_by(id=id).one()
if username:
ret.username = username
if email:
ret.email = email
if fullname:
ret.fullname = fullname
if cauth_id:
ret.cauth_id = cauth_id
if idp_sync is not None:
ret.idp_sync = idp_sync
session.commit()
except MultipleResultsFound:
msg = 'SF_USERS table has multiple row with the same id!'
logger.error(msg)
raise KeyError(msg)
except NoResultFound:
logger.warn("Could not update user %s: not found" % id)
return
def create(self, username, email,
fullname, cauth_id=None):
with session_scope() as session:
if username and email and fullname:
# assign a dummy value in case we lack the information
# as is likely to happen when migrating from a previous version
# TODO(mhu) remove these for version n+2
cid = cauth_id or -1
user = SFUser(username=username,
email=email,
fullname=fullname,
cauth_id=cid)
session.add(user)
session.commit()
return user.id
else:
msg = "Missing info required for user creation: %s|%s|%s"
raise KeyError(msg % (username, email, fullname))
def delete(self, id=None, username=None, email=None,
fullname=None, cauth_id=None):
with session_scope() as session:
filtering = {}
if id:
filtering['id'] = id
if username:
filtering['username'] = username
if email:
filtering['email'] = email
if fullname:
filtering['fullname'] = fullname
if cauth_id:
filtering['cauth_id'] = cauth_id
try:
ret = session.query(SFUser).filter_by(**filtering).one()
session.delete(ret)
session.commit()
return True
except MultipleResultsFound:
# TODO(mhu) find a better Error
raise KeyError('Too many candidates for deletion')
except NoResultFound:
return False
class NodepoolImageUpdate(Base):
__tablename__ = 'NODEPOOL_IMAGE_UPDATES'
id = Column(Integer(), primary_key=True)
status = Column(String(255), default="IN_PROGRESS")
provider = Column(String(1024), nullable=False)
image = Column(String(1024), nullable=False)
exit_code = Column(Integer(), default=-1)
stderr = Column(UnicodeText(), default="")
output = Column(UnicodeText(4294967295), default="")
class ImageUpdatesCRUD():
def create(self, provider, image):
with session_scope() as session:
if provider and image:
img_update = NodepoolImageUpdate(
provider=provider,
image=image)
session.add(img_update)
session.commit()
return img_update.id
else:
msg = "Missing info required for image update: %s|%s"
raise KeyError(msg % (provider, image))
def update(self, id, status=None, exit_code=None,
output=None, stderr=None):
with session_scope() as session:
try:
u = session.query(NodepoolImageUpdate).filter_by(id=id).one()
if status:
u.status = status
if exit_code:
u.exit_code = int(exit_code)
if output:
u.output = output
if stderr:
u.stderr = stderr
session.commit()
except NoResultFound:
logger.warn("Could not update image-update %s: not found" % id)
return
def get(self, id):
with session_scope() as session:
# TODO(mhu) Lookup by images, providers, statuses if needed?
try:
u = session.query(NodepoolImageUpdate).filter_by(id=id).one()
return row2dict(u)
except NoResultFound:
return {}
def init_model():
c = dict(conf.sqlalchemy)
url = c.pop('url')
if url.startswith('mysql') and not url.endswith('?charset=utf8'):
url += '?charset=utf8'
globals()['engine'] = create_engine(url, pool_recycle=600, **c)
if url.startswith('mysql'):
event.listen(engine, 'checkout', checkout_listener)
Base.metadata.create_all(engine)
def start_session():
Base.metadata.bind = engine
dbsession = sessionmaker(bind=engine)
session = dbsession()
return session
@contextmanager
def session_scope():
session = start_session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def add_user(user):
""" Add a user in the database
return Boolean
"""
try:
with session_scope() as session:
u = User(**user)
session.add(u)
return True, None
except exc.IntegrityError as e:
return False, unicode(e)
def get_user(username):
""" Fetch a user by its username
return user dict or False if not found
"""
try:
with session_scope() as session:
ret = session.query(User).filter_by(username=username).one()
return row2dict(ret)
except NoResultFound:
return False
def delete_user(username):
""" Delete a user by its username
return True if deleted or False if not found
"""
with session_scope() as session:
ret = session.query(User).filter_by(username=username).delete()
return bool(ret)
def update_user(username, infos):
""" Update a user by its username
arg infos: Dict
return True if deleted or False if not found
"""
with session_scope() as session:
user = session.query(User)
ret = user.filter_by(username=username).update(infos)
return bool(ret)
| apache-2.0 | 5,822,224,265,948,238,000 | 35.120219 | 79 | 0.558321 | false |
mezz64/home-assistant | tests/components/google/test_calendar.py | 13 | 11236 | """The tests for the google calendar platform."""
import copy
import httplib2
import pytest
from homeassistant.components.google import (
CONF_CAL_ID,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_NAME,
CONF_TRACK,
DEVICE_SCHEMA,
SERVICE_SCAN_CALENDARS,
do_setup,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.setup import async_setup_component
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from tests.async_mock import Mock, patch
from tests.common import async_mock_service
GOOGLE_CONFIG = {CONF_CLIENT_ID: "client_id", CONF_CLIENT_SECRET: "client_secret"}
TEST_ENTITY = "calendar.we_are_we_are_a_test_calendar"
TEST_ENTITY_NAME = "We are, we are, a... Test Calendar"
TEST_EVENT = {
"summary": "Test All Day Event",
"start": {},
"end": {},
"location": "Test Cases",
"description": "test event",
"kind": "calendar#event",
"created": "2016-06-23T16:37:57.000Z",
"transparency": "transparent",
"updated": "2016-06-24T01:57:21.045Z",
"reminders": {"useDefault": True},
"organizer": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"sequence": 0,
"creator": {
"email": "[email protected]",
"displayName": "Organizer Name",
"self": True,
},
"id": "_c8rinwq863h45qnucyoi43ny8",
"etag": '"2933466882090000"',
"htmlLink": "https://www.google.com/calendar/event?eid=*******",
"iCalUID": "[email protected]",
"status": "confirmed",
}
def get_calendar_info(calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: slugify(calendar["summary"]),
}
],
}
)
return calendar_info
@pytest.fixture(autouse=True)
def mock_google_setup(hass, test_calendar):
"""Mock the google set up functions."""
hass.loop.run_until_complete(async_setup_component(hass, "group", {"group": {}}))
calendar = get_calendar_info(test_calendar)
calendars = {calendar[CONF_CAL_ID]: calendar}
patch_google_auth = patch(
"homeassistant.components.google.do_authentication", side_effect=do_setup
)
patch_google_load = patch(
"homeassistant.components.google.load_config", return_value=calendars
)
patch_google_services = patch("homeassistant.components.google.setup_services")
async_mock_service(hass, "google", SERVICE_SCAN_CALENDARS)
with patch_google_auth, patch_google_load, patch_google_services:
yield
@pytest.fixture(autouse=True)
def mock_http(hass):
"""Mock the http component."""
hass.http = Mock()
@pytest.fixture(autouse=True)
def set_time_zone():
"""Set the time zone for the tests."""
# Set our timezone to CST/Regina so we can check calculations
# This keeps UTC-6 all year round
dt_util.set_default_time_zone(dt_util.get_time_zone("America/Regina"))
yield
dt_util.set_default_time_zone(dt_util.get_time_zone("UTC"))
@pytest.fixture(name="google_service")
def mock_google_service():
"""Mock google service."""
patch_google_service = patch(
"homeassistant.components.google.calendar.GoogleCalendarService"
)
with patch_google_service as mock_service:
yield mock_service
async def test_all_day_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
week_from_today = dt_util.dt.date.today() + dt_util.dt.timedelta(days=7)
end_event = week_from_today + dt_util.dt.timedelta(days=1)
event = copy.deepcopy(TEST_EVENT)
start = week_from_today.isoformat()
end = end_event.isoformat()
event["start"]["date"] = start
event["end"]["date"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": True,
"offset_reached": False,
"start_time": week_from_today.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_future_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
one_hour_from_now = dt_util.now() + dt_util.dt.timedelta(minutes=30)
end_event = one_hour_from_now + dt_util.dt.timedelta(minutes=60)
start = one_hour_from_now.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": one_hour_from_now.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() - dt_util.dt.timedelta(minutes=30)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_ON
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event["summary"],
"all_day": False,
"offset_reached": False,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
middle_of_event = dt_util.now() + dt_util.dt.timedelta(minutes=14)
end_event = middle_of_event + dt_util.dt.timedelta(minutes=60)
start = middle_of_event.isoformat()
end = end_event.isoformat()
event_summary = "Test Event in Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["dateTime"] = start
event["end"]["dateTime"] = end
event["summary"] = f"{event_summary} !!-15"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": False,
"offset_reached": True,
"start_time": middle_of_event.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
@pytest.mark.skip
async def test_all_day_offset_in_progress_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=1)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
event_summary = "Test All Day Event Offset In Progress"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = f"{event_summary} !!-25:0"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": True,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_all_day_offset_event(hass, mock_next_event):
"""Test that we can create an event trigger on device."""
tomorrow = dt_util.dt.date.today() + dt_util.dt.timedelta(days=2)
end_event = tomorrow + dt_util.dt.timedelta(days=1)
start = tomorrow.isoformat()
end = end_event.isoformat()
offset_hours = 1 + dt_util.now().hour
event_summary = "Test All Day Event Offset"
event = copy.deepcopy(TEST_EVENT)
event["start"]["date"] = start
event["end"]["date"] = end
event["summary"] = f"{event_summary} !!-{offset_hours}:0"
mock_next_event.return_value.event = event
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == STATE_OFF
assert dict(state.attributes) == {
"friendly_name": TEST_ENTITY_NAME,
"message": event_summary,
"all_day": True,
"offset_reached": False,
"start_time": tomorrow.strftime(DATE_STR_FORMAT),
"end_time": end_event.strftime(DATE_STR_FORMAT),
"location": event["location"],
"description": event["description"],
}
async def test_update_error(hass, google_service):
"""Test that the calendar handles a server error."""
google_service.return_value.get = Mock(
side_effect=httplib2.ServerNotFoundError("unit test")
)
assert await async_setup_component(hass, "google", {"google": GOOGLE_CONFIG})
await hass.async_block_till_done()
state = hass.states.get(TEST_ENTITY)
assert state.name == TEST_ENTITY_NAME
assert state.state == "off"
| apache-2.0 | -9,001,035,404,504,731,000 | 34.556962 | 85 | 0.645781 | false |
Chuban/moose | python/peacock/tests/base/test_PeacockCollapsibleWidget.py | 4 | 1304 | #!/usr/bin/env python
import sys
import unittest
from PyQt5 import QtWidgets
import peacock
class TestPeacockCollapsibleWidget(unittest.TestCase):
"""
Test collapsible regions.
"""
qapp = QtWidgets.QApplication(sys.argv)
def testCollapseDefault(self):
collapse = peacock.base.PeacockCollapsibleWidget(title='The Title')
main = collapse.collapsibleLayout()
widget = QtWidgets.QWidget()
main.addWidget(widget)
self.assertFalse(collapse.isCollapsed())
collapse._callbackHideButton()
self.assertTrue(collapse.isCollapsed())
collapse._callbackHideButton()
self.assertFalse(collapse.isCollapsed())
self.assertEqual(collapse._title_widget.text(), 'The Title')
def testCollapseStartCollapse(self):
collapse = peacock.base.PeacockCollapsibleWidget(collapsed=True, title='The Title')
main = collapse.collapsibleLayout()
widget = QtWidgets.QWidget()
main.addWidget(widget)
self.assertTrue(collapse.isCollapsed())
collapse._callbackHideButton()
self.assertFalse(collapse.isCollapsed())
collapse._callbackHideButton()
self.assertTrue(collapse.isCollapsed())
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
| lgpl-2.1 | -5,773,747,078,971,608,000 | 30.804878 | 91 | 0.685583 | false |
mgracer48/panda3d | direct/src/actor/DistributedActor.py | 1 | 1281 | """DistributedActor module: contains the DistributedActor class"""
__all__ = ['DistributedActor']
from direct.distributed import DistributedNode
import Actor
class DistributedActor(DistributedNode.DistributedNode, Actor.Actor):
def __init__(self, cr):
try:
self.DistributedActor_initialized
except:
self.DistributedActor_initialized = 1
Actor.Actor.__init__(self)
DistributedNode.DistributedNode.__init__(self, cr)
# Since actors are probably fairly heavyweight, we'd
# rather cache them than delete them if possible.
self.setCacheable(1)
def disable(self):
# remove all anims, on all parts and all lods
if (not self.isEmpty()):
Actor.Actor.unloadAnims(self, None, None, None)
DistributedNode.DistributedNode.disable(self)
def delete(self):
try:
self.DistributedActor_deleted
except:
self.DistributedActor_deleted = 1
DistributedNode.DistributedNode.delete(self)
Actor.Actor.delete(self)
def loop(self, animName, restart=1, partName=None, fromFrame=None, toFrame=None):
return Actor.Actor.loop(self, animName, restart, partName, fromFrame, toFrame)
| bsd-3-clause | -7,648,807,752,967,226,000 | 33.621622 | 86 | 0.650273 | false |
SUSE/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/compute/v2015_06_15/models/win_rm_listener.py | 2 | 1341 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WinRMListener(Model):
"""Describes Protocol and thumbprint of Windows Remote Management listener.
:param protocol: The Protocol used by the WinRM listener. Http and Https
are supported. Possible values include: 'Http', 'Https'
:type protocol: str or :class:`ProtocolTypes
<azure.mgmt.compute.compute.v2015_06_15.models.ProtocolTypes>`
:param certificate_url: The Certificate URL in KMS for Https listeners.
Should be null for Http listeners.
:type certificate_url: str
"""
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'ProtocolTypes'},
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
}
def __init__(self, protocol=None, certificate_url=None):
self.protocol = protocol
self.certificate_url = certificate_url
| mit | -2,387,292,432,891,178,000 | 38.441176 | 79 | 0.624907 | false |
laffra/pava | pava/implementation/natives/java/util/zip/ZipFile.py | 1 | 2834 | def add_native_methods(clazz):
def initIDs____(a0):
raise NotImplementedError()
def getEntry__long__byte____boolean__(a0, a1, a2, a3):
raise NotImplementedError()
def freeEntry__long__long__(a0, a1, a2):
raise NotImplementedError()
def getNextEntry__long__int__(a0, a1, a2):
raise NotImplementedError()
def close__long__(a0, a1):
raise NotImplementedError()
def open__java_lang_String__int__long__boolean__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def getTotal__long__(a0, a1):
raise NotImplementedError()
def startsWithLOC__long__(a0, a1):
raise NotImplementedError()
def read__long__long__long__byte____int__int__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
def getEntryTime__long__(a0, a1):
raise NotImplementedError()
def getEntryCrc__long__(a0, a1):
raise NotImplementedError()
def getEntryCSize__long__(a0, a1):
raise NotImplementedError()
def getEntrySize__long__(a0, a1):
raise NotImplementedError()
def getEntryMethod__long__(a0, a1):
raise NotImplementedError()
def getEntryFlag__long__(a0, a1):
raise NotImplementedError()
def getCommentBytes__long__(a0, a1):
raise NotImplementedError()
def getEntryBytes__long__int__(a0, a1, a2):
raise NotImplementedError()
def getZipMessage__long__(a0, a1):
raise NotImplementedError()
clazz.initIDs____ = staticmethod(initIDs____)
clazz.getEntry__long__byte____boolean__ = staticmethod(getEntry__long__byte____boolean__)
clazz.freeEntry__long__long__ = staticmethod(freeEntry__long__long__)
clazz.getNextEntry__long__int__ = staticmethod(getNextEntry__long__int__)
clazz.close__long__ = staticmethod(close__long__)
clazz.open__java_lang_String__int__long__boolean__ = staticmethod(open__java_lang_String__int__long__boolean__)
clazz.getTotal__long__ = staticmethod(getTotal__long__)
clazz.startsWithLOC__long__ = staticmethod(startsWithLOC__long__)
clazz.read__long__long__long__byte____int__int__ = staticmethod(read__long__long__long__byte____int__int__)
clazz.getEntryTime__long__ = staticmethod(getEntryTime__long__)
clazz.getEntryCrc__long__ = staticmethod(getEntryCrc__long__)
clazz.getEntryCSize__long__ = staticmethod(getEntryCSize__long__)
clazz.getEntrySize__long__ = staticmethod(getEntrySize__long__)
clazz.getEntryMethod__long__ = staticmethod(getEntryMethod__long__)
clazz.getEntryFlag__long__ = staticmethod(getEntryFlag__long__)
clazz.getCommentBytes__long__ = staticmethod(getCommentBytes__long__)
clazz.getEntryBytes__long__int__ = staticmethod(getEntryBytes__long__int__)
clazz.getZipMessage__long__ = staticmethod(getZipMessage__long__)
| mit | -899,454,201,277,582,600 | 37.297297 | 115 | 0.656316 | false |
CYBAI/servo | tests/wpt/web-platform-tests/tools/third_party/aioquic/tests/test_connection.py | 15 | 74593 | import asyncio
import binascii
import contextlib
import io
import time
from unittest import TestCase
from aioquic import tls
from aioquic.buffer import UINT_VAR_MAX, Buffer, encode_uint_var
from aioquic.quic import events
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.connection import (
QuicConnection,
QuicConnectionError,
QuicNetworkPath,
QuicReceiveContext,
)
from aioquic.quic.crypto import CryptoPair
from aioquic.quic.logger import QuicLogger
from aioquic.quic.packet import (
PACKET_TYPE_INITIAL,
QuicErrorCode,
QuicFrameType,
encode_quic_retry,
encode_quic_version_negotiation,
)
from aioquic.quic.packet_builder import QuicDeliveryState, QuicPacketBuilder
from aioquic.quic.recovery import QuicPacketPacer
from .utils import (
SERVER_CACERTFILE,
SERVER_CERTFILE,
SERVER_CERTFILE_WITH_CHAIN,
SERVER_KEYFILE,
)
CLIENT_ADDR = ("1.2.3.4", 1234)
SERVER_ADDR = ("2.3.4.5", 4433)
class SessionTicketStore:
def __init__(self):
self.tickets = {}
def add(self, ticket):
self.tickets[ticket.ticket] = ticket
def pop(self, label):
return self.tickets.pop(label, None)
def client_receive_context(client, epoch=tls.Epoch.ONE_RTT):
return QuicReceiveContext(
epoch=epoch,
host_cid=client.host_cid,
network_path=client._network_paths[0],
quic_logger_frames=[],
time=asyncio.get_event_loop().time(),
)
def consume_events(connection):
while True:
event = connection.next_event()
if event is None:
break
def create_standalone_client(self, **client_options):
client = QuicConnection(
configuration=QuicConfiguration(
is_client=True, quic_logger=QuicLogger(), **client_options
)
)
client._ack_delay = 0
# kick-off handshake
client.connect(SERVER_ADDR, now=time.time())
self.assertEqual(drop(client), 1)
return client
@contextlib.contextmanager
def client_and_server(
client_kwargs={},
client_options={},
client_patch=lambda x: None,
handshake=True,
server_kwargs={},
server_certfile=SERVER_CERTFILE,
server_keyfile=SERVER_KEYFILE,
server_options={},
server_patch=lambda x: None,
):
client_configuration = QuicConfiguration(
is_client=True, quic_logger=QuicLogger(), **client_options
)
client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE)
client = QuicConnection(configuration=client_configuration, **client_kwargs)
client._ack_delay = 0
disable_packet_pacing(client)
client_patch(client)
server_configuration = QuicConfiguration(
is_client=False, quic_logger=QuicLogger(), **server_options
)
server_configuration.load_cert_chain(server_certfile, server_keyfile)
server = QuicConnection(configuration=server_configuration, **server_kwargs)
server._ack_delay = 0
disable_packet_pacing(server)
server_patch(server)
# perform handshake
if handshake:
client.connect(SERVER_ADDR, now=time.time())
for i in range(3):
roundtrip(client, server)
yield client, server
# close
client.close()
server.close()
def disable_packet_pacing(connection):
class DummyPacketPacer(QuicPacketPacer):
def next_send_time(self, now):
return None
connection._loss._pacer = DummyPacketPacer()
def sequence_numbers(connection_ids):
return list(map(lambda x: x.sequence_number, connection_ids))
def drop(sender):
"""
Drop datagrams from `sender`.
"""
return len(sender.datagrams_to_send(now=time.time()))
def roundtrip(sender, receiver):
"""
Send datagrams from `sender` to `receiver` and back.
"""
return (transfer(sender, receiver), transfer(receiver, sender))
def transfer(sender, receiver):
"""
Send datagrams from `sender` to `receiver`.
"""
datagrams = 0
from_addr = CLIENT_ADDR if sender._is_client else SERVER_ADDR
for data, addr in sender.datagrams_to_send(now=time.time()):
datagrams += 1
receiver.receive_datagram(data, from_addr, now=time.time())
return datagrams
class QuicConnectionTest(TestCase):
def check_handshake(self, client, server, alpn_protocol=None):
"""
Check handshake completed.
"""
event = client.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, alpn_protocol)
event = client.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, alpn_protocol)
self.assertEqual(event.early_data_accepted, False)
self.assertEqual(event.session_resumed, False)
for i in range(7):
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
self.assertIsNone(client.next_event())
event = server.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
self.assertEqual(event.alpn_protocol, alpn_protocol)
event = server.next_event()
self.assertEqual(type(event), events.HandshakeCompleted)
self.assertEqual(event.alpn_protocol, alpn_protocol)
for i in range(7):
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
self.assertIsNone(server.next_event())
def test_connect(self):
with client_and_server() as (client, server):
# check handshake completed
self.check_handshake(client=client, server=server)
# check each endpoint has available connection IDs for the peer
self.assertEqual(
sequence_numbers(client._peer_cid_available), [1, 2, 3, 4, 5, 6, 7]
)
self.assertEqual(
sequence_numbers(server._peer_cid_available), [1, 2, 3, 4, 5, 6, 7]
)
# client closes the connection
client.close()
self.assertEqual(transfer(client, server), 1)
# check connection closes on the client side
client.handle_timer(client.get_timer())
event = client.next_event()
self.assertEqual(type(event), events.ConnectionTerminated)
self.assertEqual(event.error_code, QuicErrorCode.NO_ERROR)
self.assertEqual(event.frame_type, None)
self.assertEqual(event.reason_phrase, "")
self.assertIsNone(client.next_event())
# check connection closes on the server side
server.handle_timer(server.get_timer())
event = server.next_event()
self.assertEqual(type(event), events.ConnectionTerminated)
self.assertEqual(event.error_code, QuicErrorCode.NO_ERROR)
self.assertEqual(event.frame_type, None)
self.assertEqual(event.reason_phrase, "")
self.assertIsNone(server.next_event())
# check client log
client_log = client.configuration.quic_logger.to_dict()
self.assertGreater(len(client_log["traces"][0]["events"]), 20)
# check server log
server_log = server.configuration.quic_logger.to_dict()
self.assertGreater(len(server_log["traces"][0]["events"]), 20)
def test_connect_with_alpn(self):
with client_and_server(
client_options={"alpn_protocols": ["h3-25", "hq-25"]},
server_options={"alpn_protocols": ["hq-25"]},
) as (client, server):
# check handshake completed
self.check_handshake(client=client, server=server, alpn_protocol="hq-25")
def test_connect_with_secrets_log(self):
client_log_file = io.StringIO()
server_log_file = io.StringIO()
with client_and_server(
client_options={"secrets_log_file": client_log_file},
server_options={"secrets_log_file": server_log_file},
) as (client, server):
# check handshake completed
self.check_handshake(client=client, server=server)
# check secrets were logged
client_log = client_log_file.getvalue()
server_log = server_log_file.getvalue()
self.assertEqual(client_log, server_log)
labels = []
for line in client_log.splitlines():
labels.append(line.split()[0])
self.assertEqual(
labels,
[
"QUIC_SERVER_HANDSHAKE_TRAFFIC_SECRET",
"QUIC_CLIENT_HANDSHAKE_TRAFFIC_SECRET",
"QUIC_SERVER_TRAFFIC_SECRET_0",
"QUIC_CLIENT_TRAFFIC_SECRET_0",
],
)
def test_connect_with_cert_chain(self):
with client_and_server(server_certfile=SERVER_CERTFILE_WITH_CHAIN) as (
client,
server,
):
# check handshake completed
self.check_handshake(client=client, server=server)
def test_connect_with_loss_1(self):
"""
Check connection is established even in the client's INITIAL is lost.
"""
def datagram_sizes(items):
return [len(x[0]) for x in items]
client_configuration = QuicConfiguration(is_client=True)
client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE)
client = QuicConnection(configuration=client_configuration)
client._ack_delay = 0
server_configuration = QuicConfiguration(is_client=False)
server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE)
server = QuicConnection(configuration=server_configuration)
server._ack_delay = 0
# client sends INITIAL
now = 0.0
client.connect(SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280])
self.assertEqual(client.get_timer(), 1.0)
# INITIAL is lost
now = 1.0
client.handle_timer(now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280])
self.assertEqual(client.get_timer(), 3.0)
# server receives INITIAL, sends INITIAL + HANDSHAKE
now = 1.1
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280, 1030])
self.assertEqual(server.get_timer(), 2.1)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 1)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 2)
self.assertEqual(type(server.next_event()), events.ProtocolNegotiated)
self.assertIsNone(server.next_event())
# handshake continues normally
now = 1.2
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
client.receive_datagram(items[1][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [376])
self.assertAlmostEqual(client.get_timer(), 1.825)
self.assertEqual(type(client.next_event()), events.ProtocolNegotiated)
self.assertEqual(type(client.next_event()), events.HandshakeCompleted)
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
now = 1.3
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [229])
self.assertAlmostEqual(server.get_timer(), 1.825)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 0)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 0)
self.assertEqual(type(server.next_event()), events.HandshakeCompleted)
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
now = 1.4
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [32])
self.assertAlmostEqual(client.get_timer(), 61.4) # idle timeout
def test_connect_with_loss_2(self):
def datagram_sizes(items):
return [len(x[0]) for x in items]
client_configuration = QuicConfiguration(is_client=True)
client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE)
client = QuicConnection(configuration=client_configuration)
client._ack_delay = 0
server_configuration = QuicConfiguration(is_client=False)
server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE)
server = QuicConnection(configuration=server_configuration)
server._ack_delay = 0
# client sends INITIAL
now = 0.0
client.connect(SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280])
self.assertEqual(client.get_timer(), 1.0)
# server receives INITIAL, sends INITIAL + HANDSHAKE but second datagram is lost
now = 0.1
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280, 1030])
self.assertEqual(server.get_timer(), 1.1)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 1)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 2)
# client only receives first datagram and sends ACKS
now = 0.2
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [97])
self.assertAlmostEqual(client.get_timer(), 0.625)
self.assertEqual(type(client.next_event()), events.ProtocolNegotiated)
self.assertIsNone(client.next_event())
# client PTO - HANDSHAKE PING
now = client.get_timer() # ~0.625
client.handle_timer(now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [44])
self.assertAlmostEqual(client.get_timer(), 1.875)
# server receives PING, discards INITIAL and sends ACK
now = 0.725
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [48])
self.assertAlmostEqual(server.get_timer(), 1.1)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 0)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 3)
self.assertEqual(type(server.next_event()), events.ProtocolNegotiated)
self.assertIsNone(server.next_event())
# ACKs are lost, server retransmits HANDSHAKE
now = server.get_timer()
server.handle_timer(now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280, 854])
self.assertAlmostEqual(server.get_timer(), 3.1)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 0)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 3)
self.assertIsNone(server.next_event())
# handshake continues normally
now = 1.2
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
client.receive_datagram(items[1][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [329])
self.assertAlmostEqual(client.get_timer(), 2.45)
self.assertEqual(type(client.next_event()), events.HandshakeCompleted)
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
now = 1.3
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [229])
self.assertAlmostEqual(server.get_timer(), 1.925)
self.assertEqual(type(server.next_event()), events.HandshakeCompleted)
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
now = 1.4
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [32])
self.assertAlmostEqual(client.get_timer(), 61.4) # idle timeout
def test_connect_with_loss_3(self):
def datagram_sizes(items):
return [len(x[0]) for x in items]
client_configuration = QuicConfiguration(is_client=True)
client_configuration.load_verify_locations(cafile=SERVER_CACERTFILE)
client = QuicConnection(configuration=client_configuration)
client._ack_delay = 0
server_configuration = QuicConfiguration(is_client=False)
server_configuration.load_cert_chain(SERVER_CERTFILE, SERVER_KEYFILE)
server = QuicConnection(configuration=server_configuration)
server._ack_delay = 0
# client sends INITIAL
now = 0.0
client.connect(SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280])
self.assertEqual(client.get_timer(), 1.0)
# server receives INITIAL, sends INITIAL + HANDSHAKE
now = 0.1
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [1280, 1030])
self.assertEqual(server.get_timer(), 1.1)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 1)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 2)
# client receives INITIAL + HANDSHAKE
now = 0.2
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
client.receive_datagram(items[1][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [376])
self.assertAlmostEqual(client.get_timer(), 0.825)
self.assertEqual(type(client.next_event()), events.ProtocolNegotiated)
self.assertEqual(type(client.next_event()), events.HandshakeCompleted)
self.assertEqual(type(client.next_event()), events.ConnectionIdIssued)
# server completes handshake
now = 0.3
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [229])
self.assertAlmostEqual(server.get_timer(), 0.825)
self.assertEqual(len(server._loss.spaces[0].sent_packets), 0)
self.assertEqual(len(server._loss.spaces[1].sent_packets), 0)
self.assertEqual(type(server.next_event()), events.ProtocolNegotiated)
self.assertEqual(type(server.next_event()), events.HandshakeCompleted)
self.assertEqual(type(server.next_event()), events.ConnectionIdIssued)
# server PTO - 1-RTT PING
now = 0.825
server.handle_timer(now=now)
items = server.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [29])
self.assertAlmostEqual(server.get_timer(), 1.875)
# client receives PING, sends ACK
now = 0.9
client.receive_datagram(items[0][0], SERVER_ADDR, now=now)
items = client.datagrams_to_send(now=now)
self.assertEqual(datagram_sizes(items), [32])
self.assertAlmostEqual(client.get_timer(), 0.825)
# server receives ACK, retransmits HANDSHAKE_DONE
now = 1.0
self.assertFalse(server._handshake_done_pending)
server.receive_datagram(items[0][0], CLIENT_ADDR, now=now)
self.assertTrue(server._handshake_done_pending)
items = server.datagrams_to_send(now=now)
self.assertFalse(server._handshake_done_pending)
self.assertEqual(datagram_sizes(items), [224])
def test_connect_with_quantum_readiness(self):
with client_and_server(client_options={"quantum_readiness_test": True},) as (
client,
server,
):
stream_id = client.get_next_available_stream_id()
client.send_stream_data(stream_id, b"hello")
self.assertEqual(roundtrip(client, server), (1, 1))
received = None
while True:
event = server.next_event()
if isinstance(event, events.StreamDataReceived):
received = event.data
elif event is None:
break
self.assertEqual(received, b"hello")
def test_connect_with_0rtt(self):
client_ticket = None
ticket_store = SessionTicketStore()
def save_session_ticket(ticket):
nonlocal client_ticket
client_ticket = ticket
with client_and_server(
client_kwargs={"session_ticket_handler": save_session_ticket},
server_kwargs={"session_ticket_handler": ticket_store.add},
) as (client, server):
pass
with client_and_server(
client_options={"session_ticket": client_ticket},
server_kwargs={"session_ticket_fetcher": ticket_store.pop},
handshake=False,
) as (client, server):
client.connect(SERVER_ADDR, now=time.time())
stream_id = client.get_next_available_stream_id()
client.send_stream_data(stream_id, b"hello")
self.assertEqual(roundtrip(client, server), (2, 1))
event = server.next_event()
self.assertEqual(type(event), events.ProtocolNegotiated)
event = server.next_event()
self.assertEqual(type(event), events.StreamDataReceived)
self.assertEqual(event.data, b"hello")
def test_connect_with_0rtt_bad_max_early_data(self):
client_ticket = None
ticket_store = SessionTicketStore()
def patch(server):
"""
Patch server's TLS initialization to set an invalid
max_early_data value.
"""
real_initialize = server._initialize
def patched_initialize(peer_cid: bytes):
real_initialize(peer_cid)
server.tls._max_early_data = 12345
server._initialize = patched_initialize
def save_session_ticket(ticket):
nonlocal client_ticket
client_ticket = ticket
with client_and_server(
client_kwargs={"session_ticket_handler": save_session_ticket},
server_kwargs={"session_ticket_handler": ticket_store.add},
server_patch=patch,
) as (client, server):
# check handshake failed
event = client.next_event()
self.assertIsNone(event)
def test_change_connection_id(self):
with client_and_server() as (client, server):
self.assertEqual(
sequence_numbers(client._peer_cid_available), [1, 2, 3, 4, 5, 6, 7]
)
# the client changes connection ID
client.change_connection_id()
self.assertEqual(transfer(client, server), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7]
)
# the server provides a new connection ID
self.assertEqual(transfer(server, client), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7, 8]
)
def test_change_connection_id_retransmit_new_connection_id(self):
with client_and_server() as (client, server):
self.assertEqual(
sequence_numbers(client._peer_cid_available), [1, 2, 3, 4, 5, 6, 7]
)
# the client changes connection ID
client.change_connection_id()
self.assertEqual(transfer(client, server), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7]
)
# the server provides a new connection ID, NEW_CONNECTION_ID is lost
self.assertEqual(drop(server), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7]
)
# NEW_CONNECTION_ID is retransmitted
server._on_new_connection_id_delivery(
QuicDeliveryState.LOST, server._host_cids[-1]
)
self.assertEqual(transfer(server, client), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7, 8]
)
def test_change_connection_id_retransmit_retire_connection_id(self):
with client_and_server() as (client, server):
self.assertEqual(
sequence_numbers(client._peer_cid_available), [1, 2, 3, 4, 5, 6, 7]
)
# the client changes connection ID, RETIRE_CONNECTION_ID is lost
client.change_connection_id()
self.assertEqual(drop(client), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7]
)
# RETIRE_CONNECTION_ID is retransmitted
client._on_retire_connection_id_delivery(QuicDeliveryState.LOST, 0)
self.assertEqual(transfer(client, server), 1)
# the server provides a new connection ID
self.assertEqual(transfer(server, client), 1)
self.assertEqual(
sequence_numbers(client._peer_cid_available), [2, 3, 4, 5, 6, 7, 8]
)
def test_get_next_available_stream_id(self):
with client_and_server() as (client, server):
# client
stream_id = client.get_next_available_stream_id()
self.assertEqual(stream_id, 0)
client.send_stream_data(stream_id, b"hello")
stream_id = client.get_next_available_stream_id()
self.assertEqual(stream_id, 4)
client.send_stream_data(stream_id, b"hello")
stream_id = client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 2)
client.send_stream_data(stream_id, b"hello")
stream_id = client.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 6)
client.send_stream_data(stream_id, b"hello")
# server
stream_id = server.get_next_available_stream_id()
self.assertEqual(stream_id, 1)
server.send_stream_data(stream_id, b"hello")
stream_id = server.get_next_available_stream_id()
self.assertEqual(stream_id, 5)
server.send_stream_data(stream_id, b"hello")
stream_id = server.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 3)
server.send_stream_data(stream_id, b"hello")
stream_id = server.get_next_available_stream_id(is_unidirectional=True)
self.assertEqual(stream_id, 7)
server.send_stream_data(stream_id, b"hello")
def test_datagram_frame(self):
with client_and_server(
client_options={"max_datagram_frame_size": 65536},
server_options={"max_datagram_frame_size": 65536},
) as (client, server):
# check handshake completed
self.check_handshake(client=client, server=server, alpn_protocol=None)
# send datagram
client.send_datagram_frame(b"hello")
self.assertEqual(transfer(client, server), 1)
event = server.next_event()
self.assertEqual(type(event), events.DatagramFrameReceived)
self.assertEqual(event.data, b"hello")
def test_datagram_frame_2(self):
# payload which exactly fills an entire packet
payload = b"Z" * 1250
with client_and_server(
client_options={"max_datagram_frame_size": 65536},
server_options={"max_datagram_frame_size": 65536},
) as (client, server):
# check handshake completed
self.check_handshake(client=client, server=server, alpn_protocol=None)
# queue 20 datagrams
for i in range(20):
client.send_datagram_frame(payload)
# client can only 11 datagrams are sent due to congestion control
self.assertEqual(transfer(client, server), 11)
for i in range(11):
event = server.next_event()
self.assertEqual(type(event), events.DatagramFrameReceived)
self.assertEqual(event.data, payload)
# server sends ACK
self.assertEqual(transfer(server, client), 1)
# client sends remaining datagrams
self.assertEqual(transfer(client, server), 9)
for i in range(9):
event = server.next_event()
self.assertEqual(type(event), events.DatagramFrameReceived)
self.assertEqual(event.data, payload)
def test_decryption_error(self):
with client_and_server() as (client, server):
# mess with encryption key
server._cryptos[tls.Epoch.ONE_RTT].send.setup(
cipher_suite=tls.CipherSuite.AES_128_GCM_SHA256,
secret=bytes(48),
version=server._version,
)
# server sends close
server.close(error_code=QuicErrorCode.NO_ERROR)
for data, addr in server.datagrams_to_send(now=time.time()):
client.receive_datagram(data, SERVER_ADDR, now=time.time())
def test_tls_error(self):
def patch(client):
real_initialize = client._initialize
def patched_initialize(peer_cid: bytes):
real_initialize(peer_cid)
client.tls._supported_versions = [tls.TLS_VERSION_1_3_DRAFT_28]
client._initialize = patched_initialize
# handshake fails
with client_and_server(client_patch=patch) as (client, server):
timer_at = server.get_timer()
server.handle_timer(timer_at)
event = server.next_event()
self.assertEqual(type(event), events.ConnectionTerminated)
self.assertEqual(event.error_code, 326)
self.assertEqual(event.frame_type, QuicFrameType.CRYPTO)
self.assertEqual(event.reason_phrase, "No supported protocol version")
def test_receive_datagram_garbage(self):
client = create_standalone_client(self)
datagram = binascii.unhexlify("c00000000080")
client.receive_datagram(datagram, SERVER_ADDR, now=time.time())
def test_receive_datagram_reserved_bits_non_zero(self):
client = create_standalone_client(self)
builder = QuicPacketBuilder(
host_cid=client._peer_cid,
is_client=False,
peer_cid=client.host_cid,
version=client._version,
)
crypto = CryptoPair()
crypto.setup_initial(client._peer_cid, is_client=False, version=client._version)
crypto.encrypt_packet_real = crypto.encrypt_packet
def encrypt_packet(plain_header, plain_payload, packet_number):
# mess with reserved bits
plain_header = bytes([plain_header[0] | 0x0C]) + plain_header[1:]
return crypto.encrypt_packet_real(
plain_header, plain_payload, packet_number
)
crypto.encrypt_packet = encrypt_packet
builder.start_packet(PACKET_TYPE_INITIAL, crypto)
buf = builder.start_frame(QuicFrameType.PADDING)
buf.push_bytes(bytes(builder.remaining_flight_space))
for datagram in builder.flush()[0]:
client.receive_datagram(datagram, SERVER_ADDR, now=time.time())
self.assertEqual(drop(client), 1)
self.assertEqual(
client._close_event,
events.ConnectionTerminated(
error_code=QuicErrorCode.PROTOCOL_VIOLATION,
frame_type=None,
reason_phrase="Reserved bits must be zero",
),
)
def test_receive_datagram_wrong_version(self):
client = create_standalone_client(self)
builder = QuicPacketBuilder(
host_cid=client._peer_cid,
is_client=False,
peer_cid=client.host_cid,
version=0xFF000011, # DRAFT_16
)
crypto = CryptoPair()
crypto.setup_initial(client._peer_cid, is_client=False, version=client._version)
builder.start_packet(PACKET_TYPE_INITIAL, crypto)
buf = builder.start_frame(QuicFrameType.PADDING)
buf.push_bytes(bytes(builder.remaining_flight_space))
for datagram in builder.flush()[0]:
client.receive_datagram(datagram, SERVER_ADDR, now=time.time())
self.assertEqual(drop(client), 0)
def test_receive_datagram_retry(self):
client = create_standalone_client(self)
client.receive_datagram(
encode_quic_retry(
version=client._version,
source_cid=binascii.unhexlify("85abb547bf28be97"),
destination_cid=client.host_cid,
original_destination_cid=client._peer_cid,
retry_token=bytes(16),
),
SERVER_ADDR,
now=time.time(),
)
self.assertEqual(drop(client), 1)
def test_receive_datagram_retry_wrong_destination_cid(self):
client = create_standalone_client(self)
client.receive_datagram(
encode_quic_retry(
version=client._version,
source_cid=binascii.unhexlify("85abb547bf28be97"),
destination_cid=binascii.unhexlify("c98343fe8f5f0ff4"),
original_destination_cid=client._peer_cid,
retry_token=bytes(16),
),
SERVER_ADDR,
now=time.time(),
)
self.assertEqual(drop(client), 0)
def test_handle_ack_frame_ecn(self):
client = create_standalone_client(self)
client._handle_ack_frame(
client_receive_context(client),
QuicFrameType.ACK_ECN,
Buffer(data=b"\x00\x02\x00\x00\x00\x00\x00"),
)
def test_handle_connection_close_frame(self):
with client_and_server() as (client, server):
server.close(
error_code=QuicErrorCode.PROTOCOL_VIOLATION,
frame_type=QuicFrameType.ACK,
reason_phrase="illegal ACK frame",
)
self.assertEqual(roundtrip(server, client), (1, 0))
self.assertEqual(
client._close_event,
events.ConnectionTerminated(
error_code=QuicErrorCode.PROTOCOL_VIOLATION,
frame_type=QuicFrameType.ACK,
reason_phrase="illegal ACK frame",
),
)
def test_handle_connection_close_frame_app(self):
with client_and_server() as (client, server):
server.close(error_code=QuicErrorCode.NO_ERROR, reason_phrase="goodbye")
self.assertEqual(roundtrip(server, client), (1, 0))
self.assertEqual(
client._close_event,
events.ConnectionTerminated(
error_code=QuicErrorCode.NO_ERROR,
frame_type=None,
reason_phrase="goodbye",
),
)
def test_handle_connection_close_frame_app_not_utf8(self):
client = create_standalone_client(self)
client._handle_connection_close_frame(
client_receive_context(client),
QuicFrameType.APPLICATION_CLOSE,
Buffer(data=binascii.unhexlify("0008676f6f6462798200")),
)
self.assertEqual(
client._close_event,
events.ConnectionTerminated(
error_code=QuicErrorCode.NO_ERROR, frame_type=None, reason_phrase="",
),
)
def test_handle_crypto_frame_over_largest_offset(self):
with client_and_server() as (client, server):
# client receives offset + length > 2^62 - 1
with self.assertRaises(QuicConnectionError) as cm:
client._handle_crypto_frame(
client_receive_context(client),
QuicFrameType.CRYPTO,
Buffer(data=encode_uint_var(UINT_VAR_MAX) + encode_uint_var(1)),
)
self.assertEqual(
cm.exception.error_code, QuicErrorCode.FRAME_ENCODING_ERROR
)
self.assertEqual(cm.exception.frame_type, QuicFrameType.CRYPTO)
self.assertEqual(
cm.exception.reason_phrase, "offset + length cannot exceed 2^62 - 1"
)
def test_handle_data_blocked_frame(self):
with client_and_server() as (client, server):
# client receives DATA_BLOCKED: 12345
client._handle_data_blocked_frame(
client_receive_context(client),
QuicFrameType.DATA_BLOCKED,
Buffer(data=encode_uint_var(12345)),
)
def test_handle_datagram_frame(self):
client = create_standalone_client(self, max_datagram_frame_size=6)
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM,
Buffer(data=b"hello"),
)
self.assertEqual(
client.next_event(), events.DatagramFrameReceived(data=b"hello")
)
def test_handle_datagram_frame_not_allowed(self):
client = create_standalone_client(self, max_datagram_frame_size=None)
with self.assertRaises(QuicConnectionError) as cm:
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM,
Buffer(data=b"hello"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.DATAGRAM)
self.assertEqual(cm.exception.reason_phrase, "Unexpected DATAGRAM frame")
def test_handle_datagram_frame_too_large(self):
client = create_standalone_client(self, max_datagram_frame_size=5)
with self.assertRaises(QuicConnectionError) as cm:
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM,
Buffer(data=b"hello"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.DATAGRAM)
self.assertEqual(cm.exception.reason_phrase, "Unexpected DATAGRAM frame")
def test_handle_datagram_frame_with_length(self):
client = create_standalone_client(self, max_datagram_frame_size=7)
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM_WITH_LENGTH,
Buffer(data=b"\x05hellojunk"),
)
self.assertEqual(
client.next_event(), events.DatagramFrameReceived(data=b"hello")
)
def test_handle_datagram_frame_with_length_not_allowed(self):
client = create_standalone_client(self, max_datagram_frame_size=None)
with self.assertRaises(QuicConnectionError) as cm:
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM_WITH_LENGTH,
Buffer(data=b"\x05hellojunk"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.DATAGRAM_WITH_LENGTH)
self.assertEqual(cm.exception.reason_phrase, "Unexpected DATAGRAM frame")
def test_handle_datagram_frame_with_length_too_large(self):
client = create_standalone_client(self, max_datagram_frame_size=6)
with self.assertRaises(QuicConnectionError) as cm:
client._handle_datagram_frame(
client_receive_context(client),
QuicFrameType.DATAGRAM_WITH_LENGTH,
Buffer(data=b"\x05hellojunk"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.DATAGRAM_WITH_LENGTH)
self.assertEqual(cm.exception.reason_phrase, "Unexpected DATAGRAM frame")
def test_handle_handshake_done_not_allowed(self):
with client_and_server() as (client, server):
# server receives HANDSHAKE_DONE frame
with self.assertRaises(QuicConnectionError) as cm:
server._handle_handshake_done_frame(
client_receive_context(server),
QuicFrameType.HANDSHAKE_DONE,
Buffer(data=b""),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.HANDSHAKE_DONE)
self.assertEqual(
cm.exception.reason_phrase,
"Clients must not send HANDSHAKE_DONE frames",
)
def test_handle_max_data_frame(self):
with client_and_server() as (client, server):
self.assertEqual(client._remote_max_data, 1048576)
# client receives MAX_DATA raising limit
client._handle_max_data_frame(
client_receive_context(client),
QuicFrameType.MAX_DATA,
Buffer(data=encode_uint_var(1048577)),
)
self.assertEqual(client._remote_max_data, 1048577)
def test_handle_max_stream_data_frame(self):
with client_and_server() as (client, server):
# client creates bidirectional stream 0
stream = client._create_stream(stream_id=0)
self.assertEqual(stream.max_stream_data_remote, 1048576)
# client receives MAX_STREAM_DATA raising limit
client._handle_max_stream_data_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAM_DATA,
Buffer(data=b"\x00" + encode_uint_var(1048577)),
)
self.assertEqual(stream.max_stream_data_remote, 1048577)
# client receives MAX_STREAM_DATA lowering limit
client._handle_max_stream_data_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAM_DATA,
Buffer(data=b"\x00" + encode_uint_var(1048575)),
)
self.assertEqual(stream.max_stream_data_remote, 1048577)
def test_handle_max_stream_data_frame_receive_only(self):
with client_and_server() as (client, server):
# server creates unidirectional stream 3
server.send_stream_data(stream_id=3, data=b"hello")
# client receives MAX_STREAM_DATA: 3, 1
with self.assertRaises(QuicConnectionError) as cm:
client._handle_max_stream_data_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAM_DATA,
Buffer(data=b"\x03\x01"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.MAX_STREAM_DATA)
self.assertEqual(cm.exception.reason_phrase, "Stream is receive-only")
def test_handle_max_streams_bidi_frame(self):
with client_and_server() as (client, server):
self.assertEqual(client._remote_max_streams_bidi, 128)
# client receives MAX_STREAMS_BIDI raising limit
client._handle_max_streams_bidi_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_BIDI,
Buffer(data=encode_uint_var(129)),
)
self.assertEqual(client._remote_max_streams_bidi, 129)
# client receives MAX_STREAMS_BIDI lowering limit
client._handle_max_streams_bidi_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_BIDI,
Buffer(data=encode_uint_var(127)),
)
self.assertEqual(client._remote_max_streams_bidi, 129)
def test_handle_max_streams_uni_frame(self):
with client_and_server() as (client, server):
self.assertEqual(client._remote_max_streams_uni, 128)
# client receives MAX_STREAMS_UNI raising limit
client._handle_max_streams_uni_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_UNI,
Buffer(data=encode_uint_var(129)),
)
self.assertEqual(client._remote_max_streams_uni, 129)
# client receives MAX_STREAMS_UNI raising limit
client._handle_max_streams_uni_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_UNI,
Buffer(data=encode_uint_var(127)),
)
self.assertEqual(client._remote_max_streams_uni, 129)
def test_handle_new_token_frame(self):
with client_and_server() as (client, server):
# client receives NEW_TOKEN
client._handle_new_token_frame(
client_receive_context(client),
QuicFrameType.NEW_TOKEN,
Buffer(data=binascii.unhexlify("080102030405060708")),
)
def test_handle_new_token_frame_from_client(self):
with client_and_server() as (client, server):
# server receives NEW_TOKEN
with self.assertRaises(QuicConnectionError) as cm:
server._handle_new_token_frame(
client_receive_context(client),
QuicFrameType.NEW_TOKEN,
Buffer(data=binascii.unhexlify("080102030405060708")),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.NEW_TOKEN)
self.assertEqual(
cm.exception.reason_phrase, "Clients must not send NEW_TOKEN frames"
)
def test_handle_path_challenge_frame(self):
with client_and_server() as (client, server):
# client changes address and sends some data
client.send_stream_data(0, b"01234567")
for data, addr in client.datagrams_to_send(now=time.time()):
server.receive_datagram(data, ("1.2.3.4", 2345), now=time.time())
# check paths
self.assertEqual(len(server._network_paths), 2)
self.assertEqual(server._network_paths[0].addr, ("1.2.3.4", 2345))
self.assertFalse(server._network_paths[0].is_validated)
self.assertEqual(server._network_paths[1].addr, ("1.2.3.4", 1234))
self.assertTrue(server._network_paths[1].is_validated)
# server sends PATH_CHALLENGE and receives PATH_RESPONSE
for data, addr in server.datagrams_to_send(now=time.time()):
client.receive_datagram(data, SERVER_ADDR, now=time.time())
for data, addr in client.datagrams_to_send(now=time.time()):
server.receive_datagram(data, ("1.2.3.4", 2345), now=time.time())
# check paths
self.assertEqual(server._network_paths[0].addr, ("1.2.3.4", 2345))
self.assertTrue(server._network_paths[0].is_validated)
self.assertEqual(server._network_paths[1].addr, ("1.2.3.4", 1234))
self.assertTrue(server._network_paths[1].is_validated)
def test_handle_path_response_frame_bad(self):
with client_and_server() as (client, server):
# server receives unsollicited PATH_RESPONSE
with self.assertRaises(QuicConnectionError) as cm:
server._handle_path_response_frame(
client_receive_context(client),
QuicFrameType.PATH_RESPONSE,
Buffer(data=b"\x11\x22\x33\x44\x55\x66\x77\x88"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.PATH_RESPONSE)
def test_handle_padding_frame(self):
client = create_standalone_client(self)
# no more padding
buf = Buffer(data=b"")
client._handle_padding_frame(
client_receive_context(client), QuicFrameType.PADDING, buf
)
self.assertEqual(buf.tell(), 0)
# padding until end
buf = Buffer(data=bytes(10))
client._handle_padding_frame(
client_receive_context(client), QuicFrameType.PADDING, buf
)
self.assertEqual(buf.tell(), 10)
# padding then something else
buf = Buffer(data=bytes(10) + b"\x01")
client._handle_padding_frame(
client_receive_context(client), QuicFrameType.PADDING, buf
)
self.assertEqual(buf.tell(), 10)
def test_handle_reset_stream_frame(self):
with client_and_server() as (client, server):
# client creates bidirectional stream 0
client.send_stream_data(stream_id=0, data=b"hello")
consume_events(client)
# client receives RESET_STREAM
client._handle_reset_stream_frame(
client_receive_context(client),
QuicFrameType.RESET_STREAM,
Buffer(data=binascii.unhexlify("000100")),
)
event = client.next_event()
self.assertEqual(type(event), events.StreamReset)
self.assertEqual(event.error_code, QuicErrorCode.INTERNAL_ERROR)
self.assertEqual(event.stream_id, 0)
def test_handle_reset_stream_frame_send_only(self):
with client_and_server() as (client, server):
# client creates unidirectional stream 2
client.send_stream_data(stream_id=2, data=b"hello")
# client receives RESET_STREAM
with self.assertRaises(QuicConnectionError) as cm:
client._handle_reset_stream_frame(
client_receive_context(client),
QuicFrameType.RESET_STREAM,
Buffer(data=binascii.unhexlify("021100")),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.RESET_STREAM)
self.assertEqual(cm.exception.reason_phrase, "Stream is send-only")
def test_handle_retire_connection_id_frame(self):
with client_and_server() as (client, server):
self.assertEqual(
sequence_numbers(client._host_cids), [0, 1, 2, 3, 4, 5, 6, 7]
)
# client receives RETIRE_CONNECTION_ID
client._handle_retire_connection_id_frame(
client_receive_context(client),
QuicFrameType.RETIRE_CONNECTION_ID,
Buffer(data=b"\x02"),
)
self.assertEqual(
sequence_numbers(client._host_cids), [0, 1, 3, 4, 5, 6, 7, 8]
)
def test_handle_retire_connection_id_frame_current_cid(self):
with client_and_server() as (client, server):
self.assertEqual(
sequence_numbers(client._host_cids), [0, 1, 2, 3, 4, 5, 6, 7]
)
# client receives RETIRE_CONNECTION_ID for the current CID
with self.assertRaises(QuicConnectionError) as cm:
client._handle_retire_connection_id_frame(
client_receive_context(client),
QuicFrameType.RETIRE_CONNECTION_ID,
Buffer(data=b"\x00"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(
cm.exception.frame_type, QuicFrameType.RETIRE_CONNECTION_ID
)
self.assertEqual(
cm.exception.reason_phrase, "Cannot retire current connection ID"
)
self.assertEqual(
sequence_numbers(client._host_cids), [0, 1, 2, 3, 4, 5, 6, 7]
)
def test_handle_stop_sending_frame(self):
with client_and_server() as (client, server):
# client creates bidirectional stream 0
client.send_stream_data(stream_id=0, data=b"hello")
# client receives STOP_SENDING
client._handle_stop_sending_frame(
client_receive_context(client),
QuicFrameType.STOP_SENDING,
Buffer(data=b"\x00\x11"),
)
def test_handle_stop_sending_frame_receive_only(self):
with client_and_server() as (client, server):
# server creates unidirectional stream 3
server.send_stream_data(stream_id=3, data=b"hello")
# client receives STOP_SENDING
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stop_sending_frame(
client_receive_context(client),
QuicFrameType.STOP_SENDING,
Buffer(data=b"\x03\x11"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.STOP_SENDING)
self.assertEqual(cm.exception.reason_phrase, "Stream is receive-only")
def test_handle_stream_frame_over_largest_offset(self):
with client_and_server() as (client, server):
# client receives offset + length > 2^62 - 1
frame_type = QuicFrameType.STREAM_BASE | 6
stream_id = 1
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
frame_type,
Buffer(
data=encode_uint_var(stream_id)
+ encode_uint_var(UINT_VAR_MAX)
+ encode_uint_var(1)
),
)
self.assertEqual(
cm.exception.error_code, QuicErrorCode.FRAME_ENCODING_ERROR
)
self.assertEqual(cm.exception.frame_type, frame_type)
self.assertEqual(
cm.exception.reason_phrase, "offset + length cannot exceed 2^62 - 1"
)
def test_handle_stream_frame_over_max_data(self):
with client_and_server() as (client, server):
# artificially raise received data counter
client._local_max_data_used = client._local_max_data
# client receives STREAM frame
frame_type = QuicFrameType.STREAM_BASE | 4
stream_id = 1
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
frame_type,
Buffer(data=encode_uint_var(stream_id) + encode_uint_var(1)),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.FLOW_CONTROL_ERROR)
self.assertEqual(cm.exception.frame_type, frame_type)
self.assertEqual(cm.exception.reason_phrase, "Over connection data limit")
def test_handle_stream_frame_over_max_stream_data(self):
with client_and_server() as (client, server):
# client receives STREAM frame
frame_type = QuicFrameType.STREAM_BASE | 4
stream_id = 1
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
frame_type,
Buffer(
data=encode_uint_var(stream_id)
+ encode_uint_var(client._local_max_stream_data_bidi_remote + 1)
),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.FLOW_CONTROL_ERROR)
self.assertEqual(cm.exception.frame_type, frame_type)
self.assertEqual(cm.exception.reason_phrase, "Over stream data limit")
def test_handle_stream_frame_over_max_streams(self):
with client_and_server() as (client, server):
# client receives STREAM frame
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
QuicFrameType.STREAM_BASE,
Buffer(
data=encode_uint_var(client._local_max_stream_data_uni * 4 + 3)
),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_LIMIT_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.STREAM_BASE)
self.assertEqual(cm.exception.reason_phrase, "Too many streams open")
def test_handle_stream_frame_send_only(self):
with client_and_server() as (client, server):
# client creates unidirectional stream 2
client.send_stream_data(stream_id=2, data=b"hello")
# client receives STREAM frame
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
QuicFrameType.STREAM_BASE,
Buffer(data=b"\x02"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.STREAM_BASE)
self.assertEqual(cm.exception.reason_phrase, "Stream is send-only")
def test_handle_stream_frame_wrong_initiator(self):
with client_and_server() as (client, server):
# client receives STREAM frame
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_frame(
client_receive_context(client),
QuicFrameType.STREAM_BASE,
Buffer(data=b"\x00"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.STREAM_BASE)
self.assertEqual(cm.exception.reason_phrase, "Wrong stream initiator")
def test_handle_stream_data_blocked_frame(self):
with client_and_server() as (client, server):
# client creates bidirectional stream 0
client.send_stream_data(stream_id=0, data=b"hello")
# client receives STREAM_DATA_BLOCKED
client._handle_stream_data_blocked_frame(
client_receive_context(client),
QuicFrameType.STREAM_DATA_BLOCKED,
Buffer(data=b"\x00\x01"),
)
def test_handle_stream_data_blocked_frame_send_only(self):
with client_and_server() as (client, server):
# client creates unidirectional stream 2
client.send_stream_data(stream_id=2, data=b"hello")
# client receives STREAM_DATA_BLOCKED
with self.assertRaises(QuicConnectionError) as cm:
client._handle_stream_data_blocked_frame(
client_receive_context(client),
QuicFrameType.STREAM_DATA_BLOCKED,
Buffer(data=b"\x02\x01"),
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.STREAM_STATE_ERROR)
self.assertEqual(cm.exception.frame_type, QuicFrameType.STREAM_DATA_BLOCKED)
self.assertEqual(cm.exception.reason_phrase, "Stream is send-only")
def test_handle_streams_blocked_uni_frame(self):
with client_and_server() as (client, server):
# client receives STREAMS_BLOCKED_UNI: 0
client._handle_streams_blocked_frame(
client_receive_context(client),
QuicFrameType.STREAMS_BLOCKED_UNI,
Buffer(data=b"\x00"),
)
def test_payload_received_padding_only(self):
with client_and_server() as (client, server):
# client receives padding only
is_ack_eliciting, is_probing = client._payload_received(
client_receive_context(client), b"\x00" * 1200
)
self.assertFalse(is_ack_eliciting)
self.assertTrue(is_probing)
def test_payload_received_unknown_frame(self):
with client_and_server() as (client, server):
# client receives unknown frame
with self.assertRaises(QuicConnectionError) as cm:
client._payload_received(client_receive_context(client), b"\x1f")
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, 0x1F)
self.assertEqual(cm.exception.reason_phrase, "Unknown frame type")
def test_payload_received_unexpected_frame(self):
with client_and_server() as (client, server):
# client receives CRYPTO frame in 0-RTT
with self.assertRaises(QuicConnectionError) as cm:
client._payload_received(
client_receive_context(client, epoch=tls.Epoch.ZERO_RTT), b"\x06"
)
self.assertEqual(cm.exception.error_code, QuicErrorCode.PROTOCOL_VIOLATION)
self.assertEqual(cm.exception.frame_type, QuicFrameType.CRYPTO)
self.assertEqual(cm.exception.reason_phrase, "Unexpected frame type")
def test_payload_received_malformed_frame(self):
with client_and_server() as (client, server):
# client receives malformed TRANSPORT_CLOSE frame
with self.assertRaises(QuicConnectionError) as cm:
client._payload_received(
client_receive_context(client), b"\x1c\x00\x01"
)
self.assertEqual(
cm.exception.error_code, QuicErrorCode.FRAME_ENCODING_ERROR
)
self.assertEqual(cm.exception.frame_type, 0x1C)
self.assertEqual(cm.exception.reason_phrase, "Failed to parse frame")
def test_send_max_data_blocked_by_cc(self):
with client_and_server() as (client, server):
# check congestion control
self.assertEqual(client._loss.bytes_in_flight, 0)
self.assertEqual(client._loss.congestion_window, 14303)
# artificially raise received data counter
client._local_max_data_used = client._local_max_data
self.assertEqual(server._remote_max_data, 1048576)
# artificially raise bytes in flight
client._loss._cc.bytes_in_flight = 14303
# MAX_DATA is not sent due to congestion control
self.assertEqual(drop(client), 0)
def test_send_max_data_retransmit(self):
with client_and_server() as (client, server):
# artificially raise received data counter
client._local_max_data_used = client._local_max_data
self.assertEqual(server._remote_max_data, 1048576)
# MAX_DATA is sent and lost
self.assertEqual(drop(client), 1)
self.assertEqual(client._local_max_data_sent, 2097152)
self.assertEqual(server._remote_max_data, 1048576)
# MAX_DATA is retransmitted and acked
client._on_max_data_delivery(QuicDeliveryState.LOST)
self.assertEqual(client._local_max_data_sent, 0)
self.assertEqual(roundtrip(client, server), (1, 1))
self.assertEqual(server._remote_max_data, 2097152)
def test_send_max_stream_data_retransmit(self):
with client_and_server() as (client, server):
# client creates bidirectional stream 0
stream = client._create_stream(stream_id=0)
client.send_stream_data(0, b"hello")
self.assertEqual(stream.max_stream_data_local, 1048576)
self.assertEqual(stream.max_stream_data_local_sent, 1048576)
self.assertEqual(roundtrip(client, server), (1, 1))
# server sends data, just before raising MAX_STREAM_DATA
server.send_stream_data(0, b"Z" * 524288) # 1048576 // 2
for i in range(10):
roundtrip(server, client)
self.assertEqual(stream.max_stream_data_local, 1048576)
self.assertEqual(stream.max_stream_data_local_sent, 1048576)
# server sends one more byte
server.send_stream_data(0, b"Z")
self.assertEqual(transfer(server, client), 1)
# MAX_STREAM_DATA is sent and lost
self.assertEqual(drop(client), 1)
self.assertEqual(stream.max_stream_data_local, 2097152)
self.assertEqual(stream.max_stream_data_local_sent, 2097152)
client._on_max_stream_data_delivery(QuicDeliveryState.LOST, stream)
self.assertEqual(stream.max_stream_data_local, 2097152)
self.assertEqual(stream.max_stream_data_local_sent, 0)
# MAX_DATA is retransmitted and acked
self.assertEqual(roundtrip(client, server), (1, 1))
self.assertEqual(stream.max_stream_data_local, 2097152)
self.assertEqual(stream.max_stream_data_local_sent, 2097152)
def test_send_ping(self):
with client_and_server() as (client, server):
consume_events(client)
# client sends ping, server ACKs it
client.send_ping(uid=12345)
self.assertEqual(roundtrip(client, server), (1, 1))
# check event
event = client.next_event()
self.assertEqual(type(event), events.PingAcknowledged)
self.assertEqual(event.uid, 12345)
def test_send_ping_retransmit(self):
with client_and_server() as (client, server):
consume_events(client)
# client sends another ping, PING is lost
client.send_ping(uid=12345)
self.assertEqual(drop(client), 1)
# PING is retransmitted and acked
client._on_ping_delivery(QuicDeliveryState.LOST, (12345,))
self.assertEqual(roundtrip(client, server), (1, 1))
# check event
event = client.next_event()
self.assertEqual(type(event), events.PingAcknowledged)
self.assertEqual(event.uid, 12345)
def test_send_stream_data_over_max_streams_bidi(self):
with client_and_server() as (client, server):
# create streams
for i in range(128):
stream_id = i * 4
client.send_stream_data(stream_id, b"")
self.assertFalse(client._streams[stream_id].is_blocked)
self.assertEqual(len(client._streams_blocked_bidi), 0)
self.assertEqual(len(client._streams_blocked_uni), 0)
self.assertEqual(roundtrip(client, server), (0, 0))
# create one too many -> STREAMS_BLOCKED
stream_id = 128 * 4
client.send_stream_data(stream_id, b"")
self.assertTrue(client._streams[stream_id].is_blocked)
self.assertEqual(len(client._streams_blocked_bidi), 1)
self.assertEqual(len(client._streams_blocked_uni), 0)
self.assertEqual(roundtrip(client, server), (1, 1))
# peer raises max streams
client._handle_max_streams_bidi_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_BIDI,
Buffer(data=encode_uint_var(129)),
)
self.assertFalse(client._streams[stream_id].is_blocked)
def test_send_stream_data_over_max_streams_uni(self):
with client_and_server() as (client, server):
# create streams
for i in range(128):
stream_id = i * 4 + 2
client.send_stream_data(stream_id, b"")
self.assertFalse(client._streams[stream_id].is_blocked)
self.assertEqual(len(client._streams_blocked_bidi), 0)
self.assertEqual(len(client._streams_blocked_uni), 0)
self.assertEqual(roundtrip(client, server), (0, 0))
# create one too many -> STREAMS_BLOCKED
stream_id = 128 * 4 + 2
client.send_stream_data(stream_id, b"")
self.assertTrue(client._streams[stream_id].is_blocked)
self.assertEqual(len(client._streams_blocked_bidi), 0)
self.assertEqual(len(client._streams_blocked_uni), 1)
self.assertEqual(roundtrip(client, server), (1, 1))
# peer raises max streams
client._handle_max_streams_uni_frame(
client_receive_context(client),
QuicFrameType.MAX_STREAMS_UNI,
Buffer(data=encode_uint_var(129)),
)
self.assertFalse(client._streams[stream_id].is_blocked)
def test_send_stream_data_peer_initiated(self):
with client_and_server() as (client, server):
# server creates bidirectional stream
server.send_stream_data(1, b"hello")
self.assertEqual(roundtrip(server, client), (1, 1))
# server creates unidirectional stream
server.send_stream_data(3, b"hello")
self.assertEqual(roundtrip(server, client), (1, 1))
# client creates bidirectional stream
client.send_stream_data(0, b"hello")
self.assertEqual(roundtrip(client, server), (1, 1))
# client sends data on server-initiated bidirectional stream
client.send_stream_data(1, b"hello")
self.assertEqual(roundtrip(client, server), (1, 1))
# client create unidirectional stream
client.send_stream_data(2, b"hello")
self.assertEqual(roundtrip(client, server), (1, 1))
# client tries to send data on server-initial unidirectional stream
with self.assertRaises(ValueError) as cm:
client.send_stream_data(3, b"hello")
self.assertEqual(
str(cm.exception),
"Cannot send data on peer-initiated unidirectional stream",
)
# client tries to send data on unknown server-initiated bidirectional stream
with self.assertRaises(ValueError) as cm:
client.send_stream_data(5, b"hello")
self.assertEqual(
str(cm.exception), "Cannot send data on unknown peer-initiated stream"
)
def test_stream_direction(self):
with client_and_server() as (client, server):
for off in [0, 4, 8]:
# Client-Initiated, Bidirectional
self.assertTrue(client._stream_can_receive(off))
self.assertTrue(client._stream_can_send(off))
self.assertTrue(server._stream_can_receive(off))
self.assertTrue(server._stream_can_send(off))
# Server-Initiated, Bidirectional
self.assertTrue(client._stream_can_receive(off + 1))
self.assertTrue(client._stream_can_send(off + 1))
self.assertTrue(server._stream_can_receive(off + 1))
self.assertTrue(server._stream_can_send(off + 1))
# Client-Initiated, Unidirectional
self.assertFalse(client._stream_can_receive(off + 2))
self.assertTrue(client._stream_can_send(off + 2))
self.assertTrue(server._stream_can_receive(off + 2))
self.assertFalse(server._stream_can_send(off + 2))
# Server-Initiated, Unidirectional
self.assertTrue(client._stream_can_receive(off + 3))
self.assertFalse(client._stream_can_send(off + 3))
self.assertFalse(server._stream_can_receive(off + 3))
self.assertTrue(server._stream_can_send(off + 3))
def test_version_negotiation_fail(self):
client = create_standalone_client(self)
# no common version, no retry
client.receive_datagram(
encode_quic_version_negotiation(
source_cid=client._peer_cid,
destination_cid=client.host_cid,
supported_versions=[0xFF000011], # DRAFT_16
),
SERVER_ADDR,
now=time.time(),
)
self.assertEqual(drop(client), 0)
event = client.next_event()
self.assertEqual(type(event), events.ConnectionTerminated)
self.assertEqual(event.error_code, QuicErrorCode.INTERNAL_ERROR)
self.assertEqual(event.frame_type, None)
self.assertEqual(
event.reason_phrase, "Could not find a common protocol version"
)
def test_version_negotiation_ok(self):
client = create_standalone_client(self)
# found a common version, retry
client.receive_datagram(
encode_quic_version_negotiation(
source_cid=client._peer_cid,
destination_cid=client.host_cid,
supported_versions=[client._version],
),
SERVER_ADDR,
now=time.time(),
)
self.assertEqual(drop(client), 1)
class QuicNetworkPathTest(TestCase):
def test_can_send(self):
path = QuicNetworkPath(("1.2.3.4", 1234))
self.assertFalse(path.is_validated)
# initially, cannot send any data
self.assertTrue(path.can_send(0))
self.assertFalse(path.can_send(1))
# receive some data
path.bytes_received += 1
self.assertTrue(path.can_send(0))
self.assertTrue(path.can_send(1))
self.assertTrue(path.can_send(2))
self.assertTrue(path.can_send(3))
self.assertFalse(path.can_send(4))
# send some data
path.bytes_sent += 3
self.assertTrue(path.can_send(0))
self.assertFalse(path.can_send(1))
| mpl-2.0 | -6,968,755,915,311,784,000 | 40.440556 | 88 | 0.605901 | false |
Em-Pan/swift | swift/common/middleware/versioned_writes.py | 9 | 21692 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Object versioning in swift is implemented by setting a flag on the container
to tell swift to version all objects in the container. The flag is the
``X-Versions-Location`` header on the container, and its value is the
container where the versions are stored. It is recommended to use a different
``X-Versions-Location`` container for each container that is being versioned.
When data is ``PUT`` into a versioned container (a container with the
versioning flag turned on), the existing data in the file is redirected to a
new object and the data in the ``PUT`` request is saved as the data for the
versioned object. The new object name (for the previous version) is
``<versions_container>/<length><object_name>/<timestamp>``, where ``length``
is the 3-character zero-padded hexadecimal length of the ``<object_name>`` and
``<timestamp>`` is the timestamp of when the previous version was created.
A ``GET`` to a versioned object will return the current version of the object
without having to do any request redirects or metadata lookups.
A ``POST`` to a versioned object will update the object metadata as normal,
but will not create a new version of the object. In other words, new versions
are only created when the content of the object changes.
A ``DELETE`` to a versioned object will only remove the current version of the
object. If you have 5 total versions of the object, you must delete the
object 5 times to completely remove the object.
--------------------------------------------------
How to Enable Object Versioning in a Swift Cluster
--------------------------------------------------
This middleware was written as an effort to refactor parts of the proxy server,
so this functionality was already available in previous releases and every
attempt was made to maintain backwards compatibility. To allow operators to
perform a seamless upgrade, it is not required to add the middleware to the
proxy pipeline and the flag ``allow_versions`` in the container server
configuration files are still valid. In future releases, ``allow_versions``
will be deprecated in favor of adding this middleware to the pipeline to enable
or disable the feature.
In case the middleware is added to the proxy pipeline, you must also
set ``allow_versioned_writes`` to ``True`` in the middleware options
to enable the information about this middleware to be returned in a /info
request.
Upgrade considerations: If ``allow_versioned_writes`` is set in the filter
configuration, you can leave the ``allow_versions`` flag in the container
server configuration files untouched. If you decide to disable or remove the
``allow_versions`` flag, you must re-set any existing containers that had
the 'X-Versions-Location' flag configured so that it can now be tracked by the
versioned_writes middleware.
-----------------------
Examples Using ``curl``
-----------------------
First, create a container with the ``X-Versions-Location`` header or add the
header to an existing container. Also make sure the container referenced by
the ``X-Versions-Location`` exists. In this example, the name of that
container is "versions"::
curl -i -XPUT -H "X-Auth-Token: <token>" \
-H "X-Versions-Location: versions" http://<storage_url>/container
curl -i -XPUT -H "X-Auth-Token: <token>" http://<storage_url>/versions
Create an object (the first version)::
curl -i -XPUT --data-binary 1 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
Now create a new version of that object::
curl -i -XPUT --data-binary 2 -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
See a listing of the older versions of the object::
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
Now delete the current version of the object and see that the older version is
gone::
curl -i -XDELETE -H "X-Auth-Token: <token>" \
http://<storage_url>/container/myobject
curl -i -H "X-Auth-Token: <token>" \
http://<storage_url>/versions?prefix=008myobject/
---------------------------------------------------
How to Disable Object Versioning in a Swift Cluster
---------------------------------------------------
If you want to disable all functionality, set ``allow_versioned_writes`` to
``False`` in the middleware options.
Disable versioning from a container (x is any value except empty)::
curl -i -XPOST -H "X-Auth-Token: <token>" \
-H "X-Remove-Versions-Location: x" http://<storage_url>/container
"""
import time
from urllib import quote, unquote
from swift.common.utils import get_logger, Timestamp, json, \
register_swift_info, config_true_value
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.wsgi import WSGIContext, make_pre_authed_request
from swift.common.swob import Request, HTTPException
from swift.common.constraints import (
check_account_format, check_container_format, check_destination_header)
from swift.proxy.controllers.base import get_container_info
from swift.common.http import (
is_success, is_client_error, HTTP_NOT_FOUND)
from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \
HTTPServerError
from swift.common.exceptions import (
ListingIterNotFound, ListingIterError)
class VersionedWritesContext(WSGIContext):
def __init__(self, wsgi_app, logger):
WSGIContext.__init__(self, wsgi_app)
self.logger = logger
def _listing_iter(self, account_name, lcontainer, lprefix, env):
for page in self._listing_pages_iter(account_name,
lcontainer, lprefix, env):
for item in page:
yield item
def _listing_pages_iter(self, account_name, lcontainer, lprefix, env):
marker = ''
while True:
lreq = make_pre_authed_request(
env, method='GET', swift_source='VW',
path='/v1/%s/%s' % (account_name, lcontainer))
lreq.environ['QUERY_STRING'] = \
'format=json&prefix=%s&marker=%s' % (quote(lprefix),
quote(marker))
lresp = lreq.get_response(self.app)
if not is_success(lresp.status_int):
if lresp.status_int == HTTP_NOT_FOUND:
raise ListingIterNotFound()
elif is_client_error(lresp.status_int):
raise HTTPPreconditionFailed()
else:
raise ListingIterError()
if not lresp.body:
break
sublisting = json.loads(lresp.body)
if not sublisting:
break
marker = sublisting[-1]['name'].encode('utf-8')
yield sublisting
def handle_obj_versions_put(self, req, object_versions,
object_name, policy_index):
ret = None
# do a HEAD request to check object versions
_headers = {'X-Newest': 'True',
'X-Backend-Storage-Policy-Index': policy_index,
'x-auth-token': req.headers.get('x-auth-token')}
# make a pre_auth request in case the user has write access
# to container, but not READ. This was allowed in previous version
# (i.e., before middleware) so keeping the same behavior here
head_req = make_pre_authed_request(
req.environ, path=req.path_info,
headers=_headers, method='HEAD', swift_source='VW')
hresp = head_req.get_response(self.app)
is_dlo_manifest = 'X-Object-Manifest' in req.headers or \
'X-Object-Manifest' in hresp.headers
# if there's an existing object, then copy it to
# X-Versions-Location
if is_success(hresp.status_int) and not is_dlo_manifest:
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(object_name)
lprefix = prefix_len + object_name + '/'
ts_source = hresp.environ.get('swift_x_timestamp')
if ts_source is None:
ts_source = time.mktime(time.strptime(
hresp.headers['last-modified'],
'%a, %d %b %Y %H:%M:%S GMT'))
new_ts = Timestamp(ts_source).internal
vers_obj_name = lprefix + new_ts
copy_headers = {
'Destination': '%s/%s' % (lcontainer, vers_obj_name),
'x-auth-token': req.headers.get('x-auth-token')}
# COPY implementation sets X-Newest to True when it internally
# does a GET on source object. So, we don't have to explicity
# set it in request headers here.
copy_req = make_pre_authed_request(
req.environ, path=req.path_info,
headers=copy_headers, method='COPY', swift_source='VW')
copy_resp = copy_req.get_response(self.app)
if is_success(copy_resp.status_int):
# success versioning previous existing object
# return None and handle original request
ret = None
else:
if is_client_error(copy_resp.status_int):
# missing container or bad permissions
ret = HTTPPreconditionFailed(request=req)
else:
# could not copy the data, bail
ret = HTTPServiceUnavailable(request=req)
else:
if hresp.status_int == HTTP_NOT_FOUND or is_dlo_manifest:
# nothing to version
# return None and handle original request
ret = None
else:
# if not HTTP_NOT_FOUND, return error immediately
ret = hresp
return ret
def handle_obj_versions_delete(self, req, object_versions,
account_name, container_name, object_name):
lcontainer = object_versions.split('/')[0]
prefix_len = '%03x' % len(object_name)
lprefix = prefix_len + object_name + '/'
item_list = []
try:
for _item in self._listing_iter(account_name, lcontainer, lprefix,
req.environ):
item_list.append(_item)
except ListingIterNotFound:
pass
except HTTPPreconditionFailed:
return HTTPPreconditionFailed(request=req)
except ListingIterError:
return HTTPServerError(request=req)
if item_list:
# we're about to start making COPY requests - need to validate the
# write access to the versioned container
if 'swift.authorize' in req.environ:
container_info = get_container_info(
req.environ, self.app)
req.acl = container_info.get('write_acl')
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
while len(item_list) > 0:
previous_version = item_list.pop()
# there are older versions so copy the previous version to the
# current object and delete the previous version
prev_obj_name = previous_version['name'].encode('utf-8')
copy_path = '/v1/' + account_name + '/' + \
lcontainer + '/' + prev_obj_name
copy_headers = {'X-Newest': 'True',
'Destination': container_name + '/' + object_name,
'x-auth-token': req.headers.get('x-auth-token')}
copy_req = make_pre_authed_request(
req.environ, path=copy_path,
headers=copy_headers, method='COPY', swift_source='VW')
copy_resp = copy_req.get_response(self.app)
# if the version isn't there, keep trying with previous version
if copy_resp.status_int == HTTP_NOT_FOUND:
continue
if not is_success(copy_resp.status_int):
if is_client_error(copy_resp.status_int):
# some user error, maybe permissions
return HTTPPreconditionFailed(request=req)
else:
# could not copy the data, bail
return HTTPServiceUnavailable(request=req)
# reset these because the COPY changed them
new_del_req = make_pre_authed_request(
req.environ, path=copy_path, method='DELETE',
swift_source='VW')
req = new_del_req
# remove 'X-If-Delete-At', since it is not for the older copy
if 'X-If-Delete-At' in req.headers:
del req.headers['X-If-Delete-At']
break
# handle DELETE request here in case it was modified
return req.get_response(self.app)
def handle_container_request(self, env, start_response):
app_resp = self._app_call(env)
if self._response_headers is None:
self._response_headers = []
sysmeta_version_hdr = get_sys_meta_prefix('container') + \
'versions-location'
location = ''
for key, val in self._response_headers:
if key.lower() == sysmeta_version_hdr:
location = val
if location:
self._response_headers.extend([('X-Versions-Location', location)])
start_response(self._response_status,
self._response_headers,
self._response_exc_info)
return app_resp
class VersionedWritesMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.logger = get_logger(conf, log_route='versioned_writes')
def container_request(self, req, start_response, enabled):
sysmeta_version_hdr = get_sys_meta_prefix('container') + \
'versions-location'
# set version location header as sysmeta
if 'X-Versions-Location' in req.headers:
val = req.headers.get('X-Versions-Location')
if val:
# diferently from previous version, we are actually
# returning an error if user tries to set versions location
# while feature is explicitly disabled.
if not config_true_value(enabled) and \
req.method in ('PUT', 'POST'):
raise HTTPPreconditionFailed(
request=req, content_type='text/plain',
body='Versioned Writes is disabled')
location = check_container_format(req, val)
req.headers[sysmeta_version_hdr] = location
# reset original header to maintain sanity
# now only sysmeta is source of Versions Location
req.headers['X-Versions-Location'] = ''
# if both headers are in the same request
# adding location takes precendence over removing
if 'X-Remove-Versions-Location' in req.headers:
del req.headers['X-Remove-Versions-Location']
else:
# empty value is the same as X-Remove-Versions-Location
req.headers['X-Remove-Versions-Location'] = 'x'
# handle removing versions container
val = req.headers.get('X-Remove-Versions-Location')
if val:
req.headers.update({sysmeta_version_hdr: ''})
req.headers.update({'X-Versions-Location': ''})
del req.headers['X-Remove-Versions-Location']
# send request and translate sysmeta headers from response
vw_ctx = VersionedWritesContext(self.app, self.logger)
return vw_ctx.handle_container_request(req.environ, start_response)
def object_request(self, req, version, account, container, obj,
allow_versioned_writes):
account_name = unquote(account)
container_name = unquote(container)
object_name = unquote(obj)
container_info = None
resp = None
is_enabled = config_true_value(allow_versioned_writes)
if req.method in ('PUT', 'DELETE'):
container_info = get_container_info(
req.environ, self.app)
elif req.method == 'COPY' and 'Destination' in req.headers:
if 'Destination-Account' in req.headers:
account_name = req.headers.get('Destination-Account')
account_name = check_account_format(req, account_name)
container_name, object_name = check_destination_header(req)
req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % (
version, account_name, container_name, object_name)
container_info = get_container_info(
req.environ, self.app)
if not container_info:
return self.app
# To maintain backwards compatibility, container version
# location could be stored as sysmeta or not, need to check both.
# If stored as sysmeta, check if middleware is enabled. If sysmeta
# is not set, but versions property is set in container_info, then
# for backwards compatibility feature is enabled.
object_versions = container_info.get(
'sysmeta', {}).get('versions-location')
if object_versions and isinstance(object_versions, unicode):
object_versions = object_versions.encode('utf-8')
elif not object_versions:
object_versions = container_info.get('versions')
# if allow_versioned_writes is not set in the configuration files
# but 'versions' is configured, enable feature to maintain
# backwards compatibility
if not allow_versioned_writes and object_versions:
is_enabled = True
if is_enabled and object_versions:
object_versions = unquote(object_versions)
vw_ctx = VersionedWritesContext(self.app, self.logger)
if req.method in ('PUT', 'COPY'):
policy_idx = req.headers.get(
'X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
resp = vw_ctx.handle_obj_versions_put(
req, object_versions, object_name, policy_idx)
else: # handle DELETE
resp = vw_ctx.handle_obj_versions_delete(
req, object_versions, account_name,
container_name, object_name)
if resp:
return resp
else:
return self.app
def __call__(self, env, start_response):
# making a duplicate, because if this is a COPY request, we will
# modify the PATH_INFO to find out if the 'Destination' is in a
# versioned container
req = Request(env.copy())
try:
(version, account, container, obj) = req.split_path(3, 4, True)
except ValueError:
return self.app(env, start_response)
# In case allow_versioned_writes is set in the filter configuration,
# the middleware becomes the authority on whether object
# versioning is enabled or not. In case it is not set, then
# the option in the container configuration is still checked
# for backwards compatibility
# For a container request, first just check if option is set,
# can be either true or false.
# If set, check if enabled when actually trying to set container
# header. If not set, let request be handled by container server
# for backwards compatibility.
# For an object request, also check if option is set (either T or F).
# If set, check if enabled when checking versions container in
# sysmeta property. If it is not set check 'versions' property in
# container_info
allow_versioned_writes = self.conf.get('allow_versioned_writes')
if allow_versioned_writes and container and not obj:
try:
return self.container_request(req, start_response,
allow_versioned_writes)
except HTTPException as error_response:
return error_response(env, start_response)
elif obj and req.method in ('PUT', 'COPY', 'DELETE'):
try:
return self.object_request(
req, version, account, container, obj,
allow_versioned_writes)(env, start_response)
except HTTPException as error_response:
return error_response(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
if config_true_value(conf.get('allow_versioned_writes')):
register_swift_info('versioned_writes')
def obj_versions_filter(app):
return VersionedWritesMiddleware(app, conf)
return obj_versions_filter
| apache-2.0 | -2,982,273,137,846,533,600 | 42.733871 | 79 | 0.607044 | false |
cs01/pygdbmi | pygdbmi/IoManager.py | 1 | 12792 | """This module defines the `IoManager` class
which manages I/O for file objects connected to an existing gdb process
or pty.
"""
import io
import select
import time
from pprint import pformat
from typing import Union, List, Optional, Dict, Any, Tuple
from pygdbmi import gdbmiparser
import os
import logging
from pygdbmi.constants import (
DEFAULT_GDB_TIMEOUT_SEC,
DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
USING_WINDOWS,
GdbTimeoutError,
)
if USING_WINDOWS:
import msvcrt
from ctypes import windll, byref, wintypes, WinError, POINTER # type: ignore
from ctypes.wintypes import HANDLE, DWORD, BOOL
else:
import fcntl
logger = logging.getLogger(__name__)
class IoManager:
def __init__(
self,
stdin: io.BufferedWriter,
stdout: io.BufferedReader,
stderr: Optional[io.BufferedReader],
time_to_check_for_additional_output_sec=DEFAULT_TIME_TO_CHECK_FOR_ADDITIONAL_OUTPUT_SEC,
):
"""
Manage I/O for file objects created before calling this class
This can be useful if the gdb process is managed elsewhere, or if a
pty is used.
"""
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.stdin_fileno = self.stdin.fileno()
self.stdout_fileno = self.stdout.fileno()
self.stderr_fileno = self.stderr.fileno() if self.stderr else -1
self.read_list: List[int] = []
if self.stdout:
self.read_list.append(self.stdout_fileno)
self.write_list = [self.stdin_fileno]
self._incomplete_output: Dict[str, Any] = {"stdout": None, "stderr": None}
self.time_to_check_for_additional_output_sec = (
time_to_check_for_additional_output_sec
)
self._allow_overwrite_timeout_times = (
self.time_to_check_for_additional_output_sec > 0
)
make_non_blocking(self.stdout)
if self.stderr:
make_non_blocking(self.stderr)
def get_gdb_response(
self, timeout_sec: float = DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec: Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout: Whether an exception should be raised if no response was found after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError: if response is not received within timeout_sec
ValueError: if select returned unexpected file number
"""
if timeout_sec < 0:
logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if USING_WINDOWS:
retval = self._get_responses_windows(timeout_sec)
else:
retval = self._get_responses_unix(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval
def _get_responses_windows(self, timeout_sec):
"""Get responses on windows. Assume no support for select and use a while loop."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
responses_list = []
try:
self.stdout.flush()
raw_output = self.stdout.readline().replace(b"\r", b"\n")
responses_list = self._get_responses_list(raw_output, "stdout")
except IOError:
pass
try:
self.stderr.flush()
raw_output = self.stderr.readline().replace(b"\r", b"\n")
responses_list += self._get_responses_list(raw_output, "stderr")
except IOError:
pass
responses += responses_list
if timeout_sec == 0:
break
elif responses_list and self._allow_overwrite_timeout_times:
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses
def _get_responses_unix(self, timeout_sec):
"""Get responses on unix-like system. Use select to wait for output."""
timeout_time_sec = time.time() + timeout_sec
responses = []
while True:
select_timeout = timeout_time_sec - time.time()
if select_timeout <= 0:
select_timeout = 0
events, _, _ = select.select(self.read_list, [], [], select_timeout)
responses_list = None # to avoid infinite loop if using Python 2
for fileno in events:
# new data is ready to read
if fileno == self.stdout_fileno:
self.stdout.flush()
raw_output = self.stdout.read()
stream = "stdout"
elif fileno == self.stderr_fileno:
self.stderr.flush()
raw_output = self.stderr.read()
stream = "stderr"
else:
raise ValueError(
"Developer error. Got unexpected file number %d" % fileno
)
responses_list = self._get_responses_list(raw_output, stream)
responses += responses_list
if timeout_sec == 0: # just exit immediately
break
elif responses_list and self._allow_overwrite_timeout_times:
# update timeout time to potentially be closer to now to avoid lengthy wait times when nothing is being output by gdb
timeout_time_sec = min(
time.time() + self.time_to_check_for_additional_output_sec,
timeout_time_sec,
)
elif time.time() > timeout_time_sec:
break
return responses
def _get_responses_list(
self, raw_output: bytes, stream: str
) -> List[Dict[Any, Any]]:
"""Get parsed response list from string output
Args:
raw_output (unicode): gdb output to parse
stream (str): either stdout or stderr
"""
responses: List[Dict[Any, Any]] = []
(_new_output, self._incomplete_output[stream],) = _buffer_incomplete_responses(
raw_output, self._incomplete_output.get(stream)
)
if not _new_output:
return responses
response_list = list(
filter(lambda x: x, _new_output.decode(errors="replace").split("\n"))
) # remove blank lines
# parse each response from gdb into a dict, and store in a list
for response in response_list:
if gdbmiparser.response_is_finished(response):
pass
else:
parsed_response = gdbmiparser.parse_response(response)
parsed_response["stream"] = stream
logger.debug("%s", pformat(parsed_response))
responses.append(parsed_response)
return responses
def write(
self,
mi_cmd_to_write: Union[str, List[str]],
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout: bool = True,
read_response: bool = True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write: String to write to gdb. If list, it is joined by newlines.
timeout_sec: Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout: If read_response is True, raise error if no response is received
read_response: Block and read response. If there is a separate thread running, this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
TypeError: if mi_cmd_to_write is not valid
"""
# self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
# Ensure proper type of the mi command
if isinstance(mi_cmd_to_write, str):
mi_cmd_to_write_str = mi_cmd_to_write
elif isinstance(mi_cmd_to_write, list):
mi_cmd_to_write_str = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write_str.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write_str + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write_str
if USING_WINDOWS:
# select not implemented in windows for pipes
# assume it's always ready
outputready = [self.stdin_fileno]
else:
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.stdin.write(mi_cmd_to_write_nl.encode()) # type: ignore
# must flush, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.stdin.flush() # type: ignore
else:
logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return []
def _buffer_incomplete_responses(
raw_output: Optional[bytes], buf: Optional[bytes]
) -> Tuple[Optional[bytes], Optional[bytes]]:
"""It is possible for some of gdb's output to be read before it completely finished its response.
In that case, a partial mi response was read, which cannot be parsed into structured data.
We want to ALWAYS parse complete mi records. To do this, we store a buffer of gdb's
output if the output did not end in a newline.
Args:
raw_output: Contents of the gdb mi output
buf (str): Buffered gdb response from the past. This is incomplete and needs to be prepended to
gdb's next output.
Returns:
(raw_output, buf)
"""
if raw_output:
if buf:
# concatenate buffer and new output
raw_output = b"".join([buf, raw_output])
buf = None
if b"\n" not in raw_output:
# newline was not found, so assume output is incomplete and store in buffer
buf = raw_output
raw_output = None
elif not raw_output.endswith(b"\n"):
# raw output doesn't end in a newline, so store everything after the last newline (if anything)
# in the buffer, and parse everything before it
remainder_offset = raw_output.rindex(b"\n") + 1
buf = raw_output[remainder_offset:]
raw_output = raw_output[:remainder_offset]
return (raw_output, buf)
def make_non_blocking(file_obj: io.IOBase):
"""make file object non-blocking
Windows doesn't have the fcntl module, but someone on
stack overflow supplied this code as an answer, and it works
http://stackoverflow.com/a/34504971/2893090"""
if USING_WINDOWS:
LPDWORD = POINTER(DWORD)
PIPE_NOWAIT = wintypes.DWORD(0x00000001)
SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
SetNamedPipeHandleState.restype = BOOL
h = msvcrt.get_osfhandle(file_obj.fileno()) # type: ignore
res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
if res == 0:
raise ValueError(WinError())
else:
# Set the file status flag (F_SETFL) on the pipes to be non-blocking
# so we can attempt to read from a pipe with no new data without locking
# the program up
fcntl.fcntl(file_obj, fcntl.F_SETFL, os.O_NONBLOCK)
| mit | -5,533,462,441,223,731,000 | 36.078261 | 149 | 0.586304 | false |
tobiasgehring/qudi | hardware/CTC100_temperature.py | 1 | 5454 | # -*- coding: utf-8 -*-
"""
This module controls the Stanford Instruments CTC100 temperature
controller (also rebranded as CryoVac TIC500, etc).
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
from core.base import Base
import visa
class CTC100(Base):
"""
This module implements communication with CTC100 temperature controllers or clones/licensed devices.
This module is untested and very likely broken.
"""
_modclass = 'ctc100'
_modtype = 'hardware'
def on_activate(self):
""" Activate modeule
"""
config = self.getConfiguration()
self.connect(config['interface'])
def on_deactivate(self):
""" Deactivate modeule
"""
self.disconnect()
def connect(self, interface):
""" Connect to Instrument.
@param str interface: visa interface identifier
@return bool: connection success
"""
try:
self.rm = visa.ResourceManager()
self.inst = self.rm.open_resource(interface, baud_rate=9600, term_chars='\n', send_end=True)
except visa.VisaIOError as e:
self.log.exception("")
return False
else:
return True
def disconnect(self):
""" Close the connection to the instrument.
"""
self.inst.close()
self.rm.close()
def get_channel_names(self):
""" Get a list of channel names.
@return list(str): list of channel names
"""
return self.inst.ask('getOutputNames?').split(', ')
def is_channel_selected(self, channel):
""" Check if a channel is selectes
@param str channel: channel name
@return bool: whether channel is selected
"""
return self.inst.ask(channel.replace(" ", "") + '.selected?' ).split(' = ')[-1] == 'On'
def is_output_on(self):
""" Check if device outputs are enabled.
@return bool: wheter device outputs are enabled
"""
result = self.inst.ask('OutputEnable?').split()[2]
return result == 'On'
def get_temp_by_name(self, name):
""" Get temperature by name.
@return float: temperature value
"""
return self.inst.ask_for_values('{}.value?'.format(name))[0]
def get_all_outputs(self):
""" Get a list of all output names
@return list(str): output names
"""
names = self.get_channel_names()
raw = self.inst.ask('getOutputs?').split(', ')
values = []
for substr in raw:
values.append(float(substr))
return dict(zip(names, values))
def get_selected_channels(self):
""" Get all selected channels.
@return dict: dict of channel_name: bool indicating selected channels
"""
names = self.get_channel_names()
values = []
for channel in names:
values.append(self.is_channel_selected(channel))
return dict(zip(names, values))
def channel_off(self, channel):
""" Turn off channel.
@param channel str: name of channel to turn off
"""
return self.inst.ask('{}.Off'.format(channel)).split(' = ')[1]
def enable_output(self):
""" Turn on all outputs.
@return bool: whether turning on was successful
"""
if self.is_output_on():
return True
else:
result = self.inst.ask('OutputEnable = On').split()[2]
return result == 'On'
def disable_output(self):
""" Turn off all outputs.
@return bool: whether turning off was successful
"""
if self.is_output_on():
result = self.inst.ask('OutputEnable = Off').split()[2]
return result == 'Off'
else:
return True
#
# All the functions below need to be refactored with multichannel PID in mind
#
# def get_setpoint(self, channel):
# return self.inst.ask_for_values('{}.PID.setpoint?'.format(channel))[0]
#
# def set_setpoint(self, channel, setpoint):
# return self.inst.ask_for_values('{}.PID.setpoint = {}'.format(channel, setpoint))[0]
#
# def get_pid_mode(self, channel):
# return self.inst.ask('{}.PID.Mode?'.format(channel)).split(' = ')[1]
#
# def set_pid_mode(self, channel, mode):
# return self.inst.ask('{}.PID.Mode = {}'.format(channel, mode)).split(' = ')[1]
#
#
# def get_value(self, channel):
# try:
# return self.inst.ask_for_values('{}.Value?'.format(channel))[0]
# except:
# return NonNonee
#
# def set_value(self, channel, value):
# return self.inst.ask_for_values('{}.Value = {}'.format(channel, value))[0]
| gpl-3.0 | 3,432,810,270,602,299,000 | 29.988636 | 104 | 0.59901 | false |
m0ppers/arangodb | 3rdParty/V8/V8-5.0.71.39/build/gyp/test/win/gyptest-link-base-address.py | 102 | 1817 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the base address setting is extracted properly.
"""
import TestGyp
import re
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('base-address.gyp', chdir=CHDIR)
test.build('base-address.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# Extract the image base address from the headers output.
image_base_reg_ex = re.compile(r'.*\s+([0-9]+) image base.*', re.DOTALL)
exe_headers = GetHeaders('test_base_specified_exe.exe')
exe_match = image_base_reg_ex.match(exe_headers)
if not exe_match or not exe_match.group(1):
test.fail_test()
if exe_match.group(1) != '420000':
test.fail_test()
dll_headers = GetHeaders('test_base_specified_dll.dll')
dll_match = image_base_reg_ex.match(dll_headers)
if not dll_match or not dll_match.group(1):
test.fail_test()
if dll_match.group(1) != '10420000':
test.fail_test()
default_exe_headers = GetHeaders('test_base_default_exe.exe')
default_exe_match = image_base_reg_ex.match(default_exe_headers)
if not default_exe_match or not default_exe_match.group(1):
test.fail_test()
if default_exe_match.group(1) != '400000':
test.fail_test()
default_dll_headers = GetHeaders('test_base_default_dll.dll')
default_dll_match = image_base_reg_ex.match(default_dll_headers)
if not default_dll_match or not default_dll_match.group(1):
test.fail_test()
if default_dll_match.group(1) != '10000000':
test.fail_test()
test.pass_test()
| apache-2.0 | -6,967,986,929,713,247,000 | 28.306452 | 74 | 0.69235 | false |
Shao-Feng/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_default-src_cross-origin_font_allowed-manual.py | 25 | 2631 | def main(request, response):
_URL = request.url
_CSP = "default-src " + \
_URL[:_URL.index(
'/csp') + 1] + " self; script-src * 'unsafe-inline'; style-src 'unsafe-inline'"
_CSSURL = _URL[:_URL.index('/csp') + 1] + "csp/support/w3c/CanvasTest.ttf"
print _CSSURL
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_default-src_cross-origin_font_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#default-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<script src="resources/server.js?pipe=sub"></script>
<style>@font-face {font-family: Canvas;src: url(""" + _CSSURL + """);} #test {font-family: Canvas;}</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</script>
</body>
</html> """
| bsd-3-clause | 4,863,146,365,784,561,000 | 43.59322 | 112 | 0.717598 | false |
pranavtbhat/EE219 | project5/part2.py | 1 | 2030 | import json
import numpy as np
from os.path import join
from tqdm import tqdm
import statsmodels.api as stats_api
from datetime import datetime
import pandas as pd
hashtags = {
'gohawks' : 188136,
'nfl' : 259024,
'sb49' : 826951,
'gopatriots' : 26232,
'patriots' : 489713,
'superbowl' : 1348767
}
print "Extracting features from tweets"
for (htag,lcount) in hashtags.iteritems():
print "###"
print "#", htag + ":"
print "###"
with open(join('tweet_data', 'tweets_#' + htag + '.txt'), 'r') as f:
df = pd.DataFrame(index=range(lcount), columns=['dateTime', 'tweetCount', 'retweetCount', 'followerSum', 'maxFollowers'])
for i, line in tqdm(enumerate(f), total=lcount):
tweet_data = json.loads(line)
date = datetime.fromtimestamp(tweet_data['firstpost_date'])
df.set_value(i, 'dateTime', date)
df.set_value(i, 'tweetCount', 1)
df.set_value(i, 'retweetCount', tweet_data['metrics']['citations']['total'])
df.set_value(i, 'followerSum', tweet_data['author']['followers'])
df.set_value(i, 'maxFollowers', tweet_data['author']['followers'])
df = df.set_index('dateTime')
hourlySeries = df.groupby(pd.TimeGrouper(freq='60Min'))
X = np.zeros((len(hourlySeries), 5))
Y = np.zeros((len(hourlySeries)))
for i,(interval,group) in enumerate(hourlySeries):
X[i, 0] = group.tweetCount.sum()
X[i, 1] = group.retweetCount.sum()
X[i, 2] = group.followerSum.sum()
X[i, 3] = group.maxFollowers.max()
X[i, 4] = interval.hour
Y[i] = group.tweetCount.sum()
# Shift X and Y forward by one to reflect next hours predictions
X = np.nan_to_num(X[:-1])
Y = Y[1:]
# Train the regression model
result = stats_api.OLS(Y, X).fit()
print result.summary()
print "--------------------------------------------------------------------------------"
| unlicense | 5,099,998,671,002,346,000 | 32.278689 | 129 | 0.559113 | false |
vincent-noel/SigNetSim | signetsim/views/edit/ModelAnnotationsView.py | 2 | 3986 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" ModelAnnotationsView.py
This file ...
"""
from django.views.generic import TemplateView
from django import __version__
if int(__version__.split('.')[0]) < 2:
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
from libsignetsim.uris.URI import URI
from signetsim.views.HasWorkingModel import HasWorkingModel
from signetsim.views.HasErrorMessages import HasErrorMessages
from .ModelAnnotationsForm import ModelAnnotationsForm
class ModelAnnotationsView(TemplateView, HasWorkingModel, HasErrorMessages):
template_name = 'edit/annotations.html'
def __init__(self, **kwargs):
TemplateView.__init__(self, **kwargs)
HasErrorMessages.__init__(self)
HasWorkingModel.__init__(self)
self.modelHistory = None
self.modelPublication = None
self.form = ModelAnnotationsForm(self)
def get_context_data(self, **kwargs):
kwargs = HasWorkingModel.get_context_data(self, **kwargs)
kwargs = HasErrorMessages.get_context_data(self, **kwargs)
kwargs['page_address'] = reverse('edit_annotations')
kwargs['model_history'] = self.modelHistory
kwargs['model_publication'] = self.modelPublication
kwargs['form'] = self.form
return kwargs
def get(self, request, *args, **kwargs):
self.load(request, *args, **kwargs)
return TemplateView.get(self, request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.load(request, *args, **kwargs)
# HasWorkingModel.load(self, request, *args, **kwargs)
if "action" in request.POST:
if HasWorkingModel.isChooseModel(self, request):
self.load(request, *args, **kwargs)
elif request.POST['action'] == "edit_model_name":
self.saveName(request)
elif request.POST['action'] == "edit_model_notes":
self.saveNotes(request)
elif request.POST['action'] == "set_model_publication":
self.setModelPublication(request)
# self.savePickledModel(request)
return TemplateView.get(self, request, *args, **kwargs)
def load(self, request, *args, **kwargs):
HasErrorMessages.clearErrors(self)
HasWorkingModel.load(self, request, *args, **kwargs)
if len(args) > 0:
self.setModel(request, int(args[0]))
if self.isModelLoaded():
self.form.load()
self.modelHistory = self.getModel().modelHistory
if len(self.getModel().getAnnotation().getIsDescribedBy()) > 0:
self.modelPublication = self.getModel().getAnnotation().getIsDescribedBy()[0]
def saveNotes(self, request):
self.form.readNotes(request)
self.form.saveNotes()
self.saveModel(request)
def saveName(self, request):
self.form.readName(request)
self.form.saveName()
self.saveModel(request)
self.saveModelName(request, self.form.name)
def setModelPublication(self, request):
if str(request.POST['model_publication_pubmed_id']) != "":
t_uri = URI()
t_uri.setPubmed(request.POST['model_publication_pubmed_id'])
self.getModel().getAnnotation().addIsDesribedBy(t_uri)
else:
self.getModel().getAnnotation().clearIsDescribedBy()
self.saveModel(request)
if len(self.getModel().getAnnotation().getIsDescribedBy()) > 0:
self.modelPublication = self.getModel().getAnnotation().getIsDescribedBy()[0]
else:
self.modelPublication = None
| agpl-3.0 | -1,533,106,353,423,201,000 | 28.094891 | 81 | 0.730055 | false |
yksalun/hugula | Client/tools/site-packages/pyExcelerator/ExcelFormula.py | 15 | 2800 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Roman V. Kiseliov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# Roman V. Kiseliov <[email protected]>."
#
# THIS SOFTWARE IS PROVIDED BY Roman V. Kiseliov ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Roman V. Kiseliov OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__rev_id__ = """$Id: ExcelFormula.py,v 1.3 2005/08/11 08:53:48 rvk Exp $"""
import ExcelFormulaParser, ExcelFormulaLexer
import struct
from antlr import ANTLRException
class Formula(object):
__slots__ = ["__init__", "text", "rpn", "__s", "__parser"]
def __init__(self, s):
try:
self.__s = s
lexer = ExcelFormulaLexer.Lexer(s)
self.__parser = ExcelFormulaParser.Parser(lexer)
self.__parser.formula()
except ANTLRException:
raise Exception, "can't parse formula " + s
def text(self):
return self.__s
def rpn(self):
'''
Offset Size Contents
0 2 Size of the following formula data (sz)
2 sz Formula data (RPN token array)
[2+sz] var. (optional) Additional data for specific tokens
'''
return struct.pack("<H", len(self.__parser.rpn)) + self.__parser.rpn
| mit | -7,245,997,634,747,897,000 | 35.842105 | 76 | 0.6775 | false |
apagac/cfme_tests | scripts/apishow.py | 2 | 8432 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script browses through the REST API and shows all collections,
subcollections and their actions.
Optionally it can add coverage info taken from cfme log file to each action.
"""
import argparse
import os
import random
import re
import warnings
from collections import namedtuple
from manageiq_client.api import ManageIQClient as MiqApi
from cfme.utils import conf
from cfme.utils.appliance import get_or_create_current_appliance
from cfme.utils.path import log_path
Coverage = namedtuple('Coverage', 'method, action, collection, entity, subcollection, subentity')
method_re = re.compile(r'\[RESTAPI\] ([A-Z]*) http')
action_re = re.compile(r'\'action\': u?\'([a-z_]*)')
searches = [
# collection, e.g. /api/vms
r'/api/([a-z_]*) ',
# entity, e.g. /api/vms/1
r'/api/([a-z_]*)/([0-9]*) ',
# subcollection, e.g. /api/vms/1/tags
r'/api/([a-z_]*)/([0-9]*)/([a-z_]*) ',
# subcollection entity, e.g. /api/vms/1/tags/10
r'/api/([a-z_]*)/([0-9]*)/([a-z_]*)/([0-9]*) '
]
searches_re = [re.compile(search) for search in searches]
def _init_store(key, store):
"""Create key with empty dictionary if key is not already present."""
if key not in store:
store[key] = {}
def parse_coverage_line(line):
"""Parse line with RESTAPI coverage log record."""
method = action = collection = entity = subcollection = subentity = None
try:
method = method_re.search(line).group(1)
except AttributeError:
# line not in expected format
return
if method not in ('POST', 'DELETE'):
return
try:
action = action_re.search(line).group(1)
except AttributeError:
# line not in expected format
return
for expr in searches_re:
search = expr.search(line)
try:
collection = search.group(1)
entity = search.group(2)
subcollection = search.group(3)
subentity = search.group(4)
except (AttributeError, IndexError):
pass
if collection:
# found matching expression
break
else:
return
return Coverage(
method=method,
action=action,
collection=collection,
entity=entity,
subcollection=subcollection,
subentity=subentity)
def save_coverage_record(record, store):
"""Save parsed RESTAPI coverage log record into dictionary."""
_init_store(record.collection, store)
current = store[record.collection]
if record.subcollection:
_init_store(record.subcollection, current)
current = current[record.subcollection]
_init_store('entity', current)
if record.subentity:
target = current['entity']
elif record.subcollection:
target = current
elif record.entity:
target = current['entity']
else:
target = current
_init_store('actions', target)
if record.action in target['actions']:
if record.method not in target['actions'][record.action]:
target['actions'][record.action].append(record.method)
else:
target['actions'][record.action] = [record.method]
def get_coverage(logfile, store):
"""Read pytest log file and look for RESTAPI coverage log records."""
with open(logfile, 'r') as infile:
for line in infile:
if '[RESTAPI]' not in line or 'http' not in line:
continue
record = parse_coverage_line(line)
if not record:
continue
save_coverage_record(record, store)
def get_collections_info(api, store):
"""Get info about collections, subcollections and their actions."""
def _get_actions(entity, store):
try:
entity.reload_if_needed()
except KeyError:
return
try:
actions = entity._actions
except AttributeError:
return
for record in actions:
if record['name'] in store:
store[record['name']].append(record['method'].upper())
else:
store[record['name']] = [record['method'].upper()]
def _process_collection(collection, store, is_subcol=False):
_init_store(collection.name, store)
_init_store('actions_avail', store[collection.name])
_get_actions(collection, store[collection.name]['actions_avail'])
try:
collection_len = len(collection)
except AttributeError:
return
if collection_len <= 0:
return
_init_store('entity', store[collection.name])
_init_store('actions_avail', store[collection.name]['entity'])
entity = random.choice(collection)
_get_actions(entity, store[collection.name]['entity']['actions_avail'])
# don't try to process subcollections if we are already in subcollection
if not is_subcol:
subcollections = collection.options().get('subcollections', [])
for subcol_name in subcollections:
try:
subcol = getattr(entity, subcol_name)
except AttributeError:
continue
_process_collection(subcol, store[collection.name], is_subcol=True)
for collection in api.collections.all:
_process_collection(collection, store)
def print_info(store):
"""Print info about collections together with coverage info when available."""
for name, collection in sorted(store.items()):
print('=' * (2 + len(name)))
print('* {}'.format(name))
def _print_resource(res_title, res_dict):
if 'actions_avail' in res_dict and res_dict['actions_avail']:
print(' {} actions:'.format(res_title))
covered = True if 'actions' in res_dict else False
for action, methods in res_dict['actions_avail'].items():
methods_num = len(methods)
only_post = True if methods_num == 1 and methods[0] == 'POST' else False
if (covered and only_post and
action in res_dict['actions'] and
'POST' in res_dict['actions'][action]):
cov_str = ' OK'
else:
cov_str = ''
print(' * {}{}'.format(action, cov_str))
# not only POST method exists for this action, list them all
if not only_post:
for method in methods:
print(' {}{}'.format(
method,
' OK' if covered and method in res_dict['actions'][action] else ''))
if 'entity' in res_dict:
_print_resource('{} entity'.format(res_title), res_dict['entity'])
for key, subcollection in sorted(res_dict.items()):
if key in ('actions', 'actions_avail', 'entity'):
continue
_print_resource('Subcollection "{}"'.format(key), subcollection)
_print_resource('Collection', collection)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--url',
default=None,
help="URL of the target appliance, default pulled from local environment conf")
parser.add_argument(
'--logfile',
metavar='FILE',
default=os.path.join(log_path.strpath, 'cfme.log'),
help="path to cfme log file, default: %(default)s")
args = parser.parse_args()
appliance_url = args.url or get_or_create_current_appliance().url
# we are really not interested in any warnings and "warnings.simplefilter('ignore')"
# doesn't work when it's redefined later in the REST API client
warnings.showwarning = lambda *args, **kwargs: None
api = MiqApi(
'{}/api'.format(appliance_url.rstrip('/')),
(conf.credentials['default']['username'], conf.credentials['default']['password']),
verify_ssl=False)
print("Appliance URL: {}".format(appliance_url))
store = {}
get_collections_info(api, store)
if os.path.isfile(args.logfile):
get_coverage(args.logfile, store)
print_info(store)
| gpl-2.0 | 5,759,533,631,186,225,000 | 32.066667 | 100 | 0.584559 | false |
boegel/easybuild-framework | easybuild/tools/module_naming_scheme/mns.py | 2 | 7667 | ##
# Copyright 2011-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Module naming scheme API.
:author: Jens Timmerman (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import re
from easybuild.base import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import Singleton
from easybuild.tools.py2vs3 import create_base_metaclass
DEVEL_MODULE_SUFFIX = '-easybuild-devel'
# singleton metaclass: only one instance is created
BaseModuleNamingScheme = create_base_metaclass('BaseModuleNamingScheme', Singleton, object)
class ModuleNamingScheme(BaseModuleNamingScheme):
"""Abstract class for a module naming scheme implementation."""
REQUIRED_KEYS = None
def __init__(self, *args, **kwargs):
"""Initialize logger."""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
def is_sufficient(self, keys):
"""Determine whether specified list of easyconfig parameters is sufficient for this module naming scheme."""
if self.REQUIRED_KEYS is not None:
return set(keys).issuperset(set(self.REQUIRED_KEYS))
else:
raise EasyBuildError("Constant REQUIRED_KEYS is not defined, "
"should specify required easyconfig parameters.")
def requires_toolchain_details(self):
"""
Determine whether toolchain details are required by this module naming scheme,
e.g. whether one of det_toolchain_* functions are relied upon.
"""
return False
def det_full_module_name(self, ec):
"""
Determine full module name, relative to the top of the module path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with full module name, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
raise NotImplementedError
def det_short_module_name(self, ec):
"""
Determine short module name, i.e. the name under which modules will be exposed to users.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with module name, e.g. '<name>/<version>'
"""
# by default: full module name doesn't include a $MODULEPATH subdir
return self.det_full_module_name(ec)
def det_install_subdir(self, ec):
"""
Determine name of software installation subdirectory of install path.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with name of subdirectory, e.g.: '<compiler>/<mpi_lib>/<name>/<version>'
"""
# by default: use full module name as name for install subdir
return self.det_full_module_name(ec)
def det_module_subdir(self, ec):
"""
Determine subdirectory for module file in $MODULEPATH.
This determines the separation between module names exposed to users, and what's part of the $MODULEPATH.
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: string with subdir path (relative to $MODULEPATH), e.g. '<compiler>/<mpi_lib>'
"""
# by default: no subdirectory
return ''
def det_module_symlink_paths(self, ec):
"""
Determine list of paths in which symlinks to module files must be created.
"""
# by default: make a symlink from moduleclass subdirectory of $MODULEPATH
return [ec['moduleclass']]
def det_modpath_extensions(self, ec):
"""
Determine list of subdirectories for which to extend $MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: an empty list of subdirectories to extend $MODULEPATH with
return []
def det_user_modpath_extensions(self, ec):
"""
Determine list of subdirectories relative to the user-specific modules directory for which to extend
$MODULEPATH with when this module is loaded (if any).
:param ec: dict-like object with easyconfig parameter values; for now only the 'name',
'version', 'versionsuffix' and 'toolchain' parameters are guaranteed to be available
:return: A list of $MODULEPATH subdirectories.
"""
# by default: use "system" module path extensions of naming scheme
return self.det_modpath_extensions(ec)
def det_init_modulepaths(self, ec):
"""
Determine initial module paths, where the modules that are top of the hierarchy (if any) live.
"""
return []
def expand_toolchain_load(self, ec=None):
"""
Determine whether load statements for a toolchain should be expanded to load statements for its dependencies.
This is useful when toolchains are not exposed to users.
"""
# by default: just include a load statement for the toolchain
return False
def is_short_modname_for(self, short_modname, name):
"""
Determine whether the specified (short) module name is a module for software with the specified name.
Default implementation checks via a strict regex pattern, and assumes short module names are of the form:
<name>/<version>[-<toolchain>]
"""
modname_regex = re.compile(r'^%s(/\S+)?$' % re.escape(name))
res = bool(modname_regex.match(short_modname))
self.log.debug("Checking whether '%s' is a module name for software with name '%s' via regex %s: %s",
short_modname, name, modname_regex.pattern, res)
return res
def det_make_devel_module(self):
"""
Determine if a devel module should be generated.
Can be used to create a separate set of modules with a different naming scheme.
Software is already installed beforehand with one naming scheme, including development module.
"""
return True
| gpl-2.0 | -1,843,140,398,095,341,800 | 41.832402 | 117 | 0.670145 | false |
dshlai/oyprojectmanager | tests/utils/test_backup.py | 3 | 17924 | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2012, Erkan Ozgur Yilmaz
#
# This module is part of oyProjectManager and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
#
#import os
#import shutil
#import tempfile
#import subprocess
#import unittest
#from xml.dom import minidom
#import jinja2
#import pyseq
#from oyProjectManager import config
#from oyProjectManager import Asset, Repository, Sequence, Shot, Project
#from oyProjectManager.utils.backup import BackUp
#
#conf = config.Config()
#
#class BackUpCreationTester(unittest.TestCase):
# """tests :mod:`~oyProjectManager.utils.backup` module
# """
#
# @classmethod
# def setUpClass(cls):
# """set up the test at class level
# """
# os.environ[conf.repository_env_key] = tempfile.mkdtemp()
#
# # create a new project called BACKUP_TEST_PROJECT
# cls.test_project = Project(name="BACKUP_TEST_PROJECT")
# cls.test_project.create()
#
# @classmethod
# def tearDownClass(cls):
# """tear down the test at class level
# """
# # remove the project dir
# try:
# shutil.rmtree(cls.test_project.full_path)
# except IOError:
# pass
#
# def setUp(self):
# """sets up the test
# """
# # create a BackUp node
# self.kwargs = {
# "project": "BACKUP_TEST_PROJECT",
# "output": "/tmp/oyProjectManager_Backup/BackUp",
# "number_of_versions": 1,
# "extra_filter_rules":
# "/tmp/oyProjectManager_BackUp/extra_filter_rules",
# }
#
# self.test_backUp_obj = BackUp(**self.kwargs)
#
# def test_project_argument_skipped(self):
# """testing if a TypeError will be raised when the project argument is
# skipped
# """
# self.kwargs.pop("project")
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_project_argument_is_empty_string(self):
# """testing if a ValueError will be raised when the project argument
# is an empty string
# """
# self.kwargs["project"] = ""
# self.assertRaises(ValueError, BackUp, **self.kwargs)
#
# def test_project_attribute_is_empty_string(self):
# """testing if a ValueError will be raised when the project attribute
# is empty string
# """
# self.assertRaises(ValueError, setattr, self.test_backUp_obj,
# "project", "")
#
# def test_project_argument_is_not_a_Project_instance_or_string(self):
# """testing if a TypeError will be raised when the project argument is
# not a :class:`~oyProjectManager.models.project.Project` instance or
# string
# """
# self.kwargs["project"] = 123123
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_project_attribute_is_not_a_Project_instance_or_string(self):
# """testing if a TypeError will be raised when the project attribute is
# not a :class:`~oyProjectManager.models.project.Project` instance or a
# valid project name
# """
# test_value = 123123
# self.assertRaises(TypeError, setattr, self.test_backUp_obj,
# "project", test_value)
#
# def test_project_argument_works_properly(self):
# """testing if the project argument is working properly, means it fills
# the project attribute with the appropriate value
# """
#
# self.kwargs["project"] = "BACKUP_TEST_PROJECT"
# new_backup = BackUp(**self.kwargs)
# self.assertEqual(new_backup.project,
# Project(name=self.kwargs["project"]))
#
#
# def test_project_attribute_works_properly(self):
# """testing if the project attribute is working properly
# """
# repo = Repository()
# project_name = repo.project_names[0]
#
# self.assertNotEqual(project_name, "")
# project = Project(name=project_name)
#
# self.test_backUp_obj.project = project
#
# self.assertEqual(self.test_backUp_obj.project, project)
#
# def test_project_argument_is_not_an_existing_Project(self):
# """testing if a RuntimeError will be raised when the given Project
# with the project argument is not an existing project instance
# """
# self.kwargs["project"] = "there is no project with this name"
# self.assertRaises(RuntimeError, BackUp, **self.kwargs)
#
# def test_project_attribute_is_not_an_existing_Project(self):
# """testing if a RuntimeError will be raised when the given Project with
# the project attribute is not an existing project
# """
# self.assertRaises(RuntimeError, setattr, self.test_backUp_obj,
# "project", "there is no project with this name")
#
# def test_extra_filter_rules_argument_is_skipped(self):
# """testing if extra_filter_rules attribute will be an empty string if
# the extra_filter_rules argument is an empty string
# """
# self.kwargs.pop("extra_filter_rules")
# new_BackUp_obj = BackUp(**self.kwargs)
# self.assertEqual(new_BackUp_obj.extra_filter_rules, "")
#
# def test_extra_filter_rules_argument_is_empty_string(self):
# """testing if extra_filter_rules attribute will be an empty string when
# the extra_filter_rules argument is an empty string
# """
# self.kwargs["extra_filter_rules"] = ""
# new_BackUp_obj = BackUp(**self.kwargs)
# self.assertEqual(new_BackUp_obj.extra_filter_rules, "")
#
# def test_extra_filter_rules_argument_is_not_a_string(self):
# """testing if a TypeError will be raised when the extra_filter_rules
# argument is not a string instance
# """
# self.kwargs["extra_filter_rules"] = 213132
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_extra_filter_rules_attribute_is_not_a_string(self):
# """testing if a TypeError will be raised when the extra_filter_rules
# attribute is not a string instance
# """
# self.assertRaises(TypeError, setattr, self.test_backUp_obj,
# "extra_filter_rules", 1234)
#
# def test_extra_filter_rules_argument_is_working_properly(self):
# """testing if extra_filter_rules attribute is set according to the
# value of extra_filter_rules argument
# """
# test_value = "test_value"
# self.kwargs["extra_filter_rules"] = test_value
# new_BackUp_obj = BackUp(**self.kwargs)
# self.assertEqual(new_BackUp_obj.extra_filter_rules, test_value)
#
# def test_extra_filter_rules_attribute_is_working_properly(self):
# """testing if the extra_filter_rules attribute is working properly
# """
# test_value = "test_value"
# self.test_backUp_obj.extra_filter_rules = test_value
# self.assertEqual(self.test_backUp_obj.extra_filter_rules, test_value)
#
# def test_output_argument_is_skipped(self):
# """testing if a TypeError will be raised when the output argument is
# skipped
# """
# self.kwargs.pop("output")
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_output_argument_is_empty_string(self):
# """testing if a ValueError will be raised when the output argument is
# an empty string
# """
# self.kwargs["output"] = ""
# self.assertRaises(ValueError, BackUp, **self.kwargs)
#
#
# def test_output_attribute_is_empty_string(self):
# """testing if a ValueError will be raised when the output attribute
# is set to an empty string
# """
# self.assertRaises(ValueError, setattr, self.test_backUp_obj,
# "output", "")
#
# def test_output_argument_is_not_a_string(self):
# """testing if a TypeError will be raised when the output argument is
# not a string instance
# """
# self.kwargs["output"] = 2134234
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_output_attribute_is_not_a_string(self):
# """testing if a TypeError will be raised when the output attribute is
# not a string instance
# """
# self.assertRaises(TypeError, setattr, self.test_backUp_obj, "output",
# 1231)
#
# def test_number_of_versions_argument_is_skipped(self):
# """testing if the value of the number_of_versions attribute will be
# the default value when the number_of_versions argument is skipped
# """
# self.kwargs.pop("number_of_versions")
# new_backup = BackUp(**self.kwargs)
# self.assertEqual(new_backup.num_of_versions, 1)
#
# def test_number_of_versions_argument_is_None(self):
# """testing if the value of the number_of_versions attribute will be
# the default value when the number_of_versions argument is None
# """
# self.kwargs["number_of_versions"] = None
# new_backup = BackUp(**self.kwargs)
# self.assertEqual(new_backup.num_of_versions, 1)
#
# def test_number_of_versions_attribute_is_None(self):
# """testing if the number_of_versions attribute will be set to the
# default value when it is set to None
# """
# self.test_backUp_obj.num_of_versions = None
# self.assertEqual(self.test_backUp_obj.num_of_versions, 1)
#
# def test_number_of_versions_argument_is_not_integer(self):
# """testing if a TypeError will be raised when the number_of_versions
# argument is not an integer
# """
# self.kwargs["number_of_versions"] = "not integer"
# self.assertRaises(TypeError, BackUp, **self.kwargs)
#
# def test_number_of_versions_attribute_is_not_integer(self):
# """testing if a TypeError will be raised when the number_of_versions
# attribute is set to a value which is not an integer
# """
# self.assertRaises(TypeError, setattr, self.test_backUp_obj,
# "num_of_v")
#
# def test_number_of_versions_argument_accepts_negative_values(self):
# """testing if the number_of_version argument accepts negative values
# """
# test_value = -1
# self.kwargs["number_of_versions"] = test_value
# new_backup = BackUp(**self.kwargs)
# self.assertEqual(new_backup.num_of_versions, test_value)
#
#
##class BackUp_DoBackup_Tester(unittest.TestCase):
## """tests the backup process
## """
##
## def setUp(self):
## """setup the test
## """
##
## # -----------------------------------------------------------------
## # start of the setUp
## # create the environment variable and point it to a temp directory
## self.temp_config_folder = tempfile.mkdtemp()
## self.temp_projects_folder = tempfile.mkdtemp()
##
## os.environ["OYPROJECTMANAGER_PATH"] = self.temp_config_folder
## os.environ["REPO"] = self.temp_projects_folder
##
## # create a project
## self.test_project = Project(name="BACKUP_TEST_PROJECT")
## self.test_project.create()
##
## # create a couple of sequences
## self.test_seq1 = Sequence(self.test_project, "BACKUP_TEST_SEQ1")
## self.test_seq1.shots.append(Shot(self.test_seq1, 1))
## self.test_seq1.create()
##
## self.test_seq2 = Sequence(self.test_project, "BACKUP_TEST_SEQ2")
## self.test_seq2.shots.append(Shot(self.test_seq2, 1))
## self.test_seq2.create()
##
## # create an FX asset
## self.fx_asset = Asset(
## self.test_project,
## self.test_seq1,
## "SH001_MAIN_FX_r00_v001_oy.ma"
## )
##
## self.lighting_asset = Asset(
## self.test_project,
## self.test_seq1,
## "SH001_MAIN_LIGHTING_r00_v002_oy.ma"
## )
##
## self.compositing_asset1 = Asset(
## self.test_project,
## self.test_seq1,
## "SH001_MAIN_COMP_r00_v001_oy.nk"
## )
##
## self.compositing_asset2 = Asset(
## self.test_project,
## self.test_seq1,
## "SH001_MAIN_COMP_r00_v002_oy.nk"
## )
##
## # create the render image sequence
## self.imageSeq1 = pyseq.uncompress(
## os.path.join(
## self.test_seq1.full_path,
## self.fx_asset.output_path,
## "test_image_seq1.%03d.jpg 1-100",
## ),
## format="%h%p%t %r"
## )
##
## try:
## os.makedirs(
## os.path.dirname(
## self.imageSeq1.path()
## )
## )
## except OSError:
## pass
##
##
## self.imageSeq2 = pyseq.uncompress(
## os.path.join(
## self.test_seq1.full_path,
## self.fx_asset.output_path,
## "test_image_seq2.%03d.jpg 1-100",
## ),
## format="%h%p%t %r"
## )
##
## try:
## os.makedirs(
## os.path.dirname(
## self.imageSeq2.path()
## )
## )
## except OSError:
## pass
##
##
##
## self.imageSeq3 = pyseq.uncompress(
## os.path.join(
## self.test_seq1.full_path,
## self.fx_asset.output_path,
## "test_image_seq3.%03d.jpg 1-100",
## ),
## format="%h%p%t %r"
## )
##
## try:
## os.makedirs(
## os.path.dirname(
## self.imageSeq3.path()
## )
## )
## except OSError:
## pass
##
##
## self.imageSeq4 = pyseq.uncompress(
## os.path.join(
## self.test_seq1.full_path,
## self.compositing_asset2.output_path,
## os.path.splitext(self.compositing_asset2.fileName)[0] + \
## ".%03d.jpg 1-100",
## ),
## format="%h%p%t %r"
## )
##
## try:
## os.makedirs(
## os.path.dirname(
## self.imageSeq4.path()
## )
## )
## except OSError:
## pass
##
##
## for image in self.imageSeq1:
## subprocess.call(["touch", image.path], shell=False)
##
## for image in self.imageSeq2:
## subprocess.call(["touch", image.path], shell=False)
##
## for image in self.imageSeq3:
## subprocess.call(["touch", image.path], shell=False)
##
## for image in self.imageSeq4:
## subprocess.call(["touch", image.path], shell=False)
##
## # create a nuke file with several read and write nodes
## # open the nuke file
## self.nuke_file = open(
## os.path.join(
## self._test_files_folder,
## "nuke_file_template.nk"
## ),
## "r"
## )
##
## # render it as a jinja2 template
## nuke_template = jinja2.Template(self.nuke_file.read())
## self.nuke_file.close()
##
##
## # write it to the new path
## nuke_output_file = open(
## self.compositing_asset2.full_path,
## "w"
## )
##
## print self.compositing_asset2.full_path
##
##
## nuke_output_file.write(
## nuke_template.render(
## project_dir=self.test_seq1.full_path,
## comp_file_path=self.compositing_asset2.full_path
## )
## )
##
## nuke_output_file.close()
## # test the backup process
##
## # create a place to backup the files
## self.backup_path = tempfile.mkdtemp()
##
##
## def tearDown(self):
## """tear down the test
## """
## shutil.rmtree(self.temp_config_folder)
## shutil.rmtree(self.temp_projects_folder)
##
##
## def test_doBackUp_(self):
## """
## """
##
## # now back up the project
## backup_obj = BackUp(self.test_project.name, self.backup_path)
## backup_obj.doBackup()
##
## # now test if the project is created with all the paths in the backup
## # path
##
## # there should be three sequences in the backup path
## # read1 --> self.imageSeq2
## # read2 --> self.imageSeq3
## # write --> self.imageSeq4
##
##
## self.assertTrue(
## all(
## [os.path.exists(
## item.path.replace(
## self.test_project.full_path, self.backup_path
## )
## ) for item in self.imageSeq2]
## )
## )
##
## self.assertTrue(
## all(
## [os.path.exists(
## item.path.replace(
## self.test_project.full_path, self.backup_path
## )
## ) for item in self.imageSeq3]
## )
## )
##
## self.assertTrue(
## all(
## [os.path.exists(
## item.path.replace(
## self.test_project.full_path, self.backup_path
## )
## ) for item in self.imageSeq4]
## )
## )
#
#
#
| bsd-2-clause | -5,068,876,282,032,170,000 | 34.705179 | 80 | 0.538496 | false |
coxmediagroup/nodemeister | enc/api.py | 2 | 2198 | """
Classes, serializers, router registration for NodeMeister
ENC django-rest-framework API
"""
from rest_framework import viewsets, routers, serializers
from models import *
class NodeSerializer(serializers.ModelSerializer):
class Meta:
model = Node
fields = ('hostname',
'description',
'groups',
'excluded_groups',
'parameters',
'classes',
'excluded_params',
'excluded_classes',
'id'
)
class NodeViewSet(viewsets.ModelViewSet):
serializer_class = NodeSerializer
model = Node
class GroupSerializer(serializers.ModelSerializer):
class Meta:
model = Group
fields = ('name', 'description', 'groups', 'parents',
'parameters', 'classes', 'parameters', 'classes', 'id')
class GroupViewSet(viewsets.ModelViewSet):
serializer_class = GroupSerializer
model = Group
class NodeClassViewSet(viewsets.ModelViewSet):
model = NodeClass
class GroupClassSerializer(serializers.ModelSerializer):
class Meta:
model = GroupClass
fields = ('group', 'classname', 'classparams', 'id')
class GroupClassViewSet(viewsets.ModelViewSet):
serializer_class = GroupClassSerializer
model = GroupClass
class NodeParamViewSet(viewsets.ModelViewSet):
model = NodeParameter
class GroupParamViewSet(viewsets.ModelViewSet):
model = GroupParameter
class ParamExclusionViewSet(viewsets.ModelViewSet):
model = ParamExclusion
class ClassExclusionViewSet(viewsets.ModelViewSet):
model = ClassExclusion
# Routers provide an easy way of automatically determining the URL conf
router = routers.DefaultRouter()
router.register(r'nodes', NodeViewSet)
router.register(r'groups', GroupViewSet)
router.register(r'classes/nodes', NodeClassViewSet)
router.register(r'classes/groups', GroupClassViewSet)
router.register(r'parameters/nodes', NodeParamViewSet)
router.register(r'parameters/groups', GroupParamViewSet)
router.register(r'exclusions/parameters', ParamExclusionViewSet)
router.register(r'exclusions/classes', ClassExclusionViewSet)
| apache-2.0 | 8,018,006,906,134,616,000 | 26.135802 | 73 | 0.694722 | false |
mugizico/scikit-learn | sklearn/metrics/tests/test_common.py | 43 | 44042 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" and
# "multilabel-sequence" format support
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error"
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y1 = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y2 = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
# Be sure to have at least one empty label
y1 += ([], )
y2 += ([], )
# NOTE: The "sorted" trick is necessary to shuffle labels, because it
# allows to return the shuffled tuple.
rng = check_random_state(42)
shuffled = lambda x: sorted(x, key=lambda *args: rng.rand())
y1_shuffle = [shuffled(x) for x in y1]
y2_shuffle = [shuffled(x) for x in y2]
# Let's have redundant labels
y2_redundant = [x * rng.randint(1, 4) for x in y2]
# Binary indicator matrix format
lb = MultiLabelBinarizer().fit([range(n_classes)])
y1_binary_indicator = lb.transform(y1)
y2_binary_indicator = lb.transform(y2)
y1_sparse_indicator = sp.coo_matrix(y1_binary_indicator)
y2_sparse_indicator = sp.coo_matrix(y2_binary_indicator)
y1_shuffle_binary_indicator = lb.transform(y1_shuffle)
y2_shuffle_binary_indicator = lb.transform(y2_shuffle)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1_binary_indicator, y2_binary_indicator)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
# Check shuffling invariance with dense binary indicator matrix
assert_almost_equal(metric(y1_shuffle_binary_indicator,
y2_shuffle_binary_indicator), measure,
err_msg="%s failed shuffling invariance "
" with dense binary indicator format."
% name)
# Check deprecation warnings related to sequence of sequences
deprecated_metric = partial(assert_warns, DeprecationWarning, metric)
# Check representation invariance
assert_almost_equal(deprecated_metric(y1, y2),
measure,
err_msg="%s failed representation invariance "
"between list of list of labels "
"format and dense binary indicator "
"format." % name)
# Check invariance with redundant labels with list of labels
assert_almost_equal(deprecated_metric(y1, y2_redundant), measure,
err_msg="%s failed rendundant label invariance"
% name)
# Check shuffling invariance with list of labels
assert_almost_equal(deprecated_metric(y1_shuffle, y2_shuffle), measure,
err_msg="%s failed shuffling invariance "
"with list of list of labels format."
% name)
# Check raises error with mix input representation
assert_raises(ValueError, deprecated_metric, y1, y2_binary_indicator)
assert_raises(ValueError, deprecated_metric, y1_binary_indicator, y2)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y_true = make_ml(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples)
_, y_pred = make_ml(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples)
# Be sure to have at least one empty label
y_true += ([], )
y_pred += ([], )
n_samples += 1
lb = MultiLabelBinarizer().fit([range(n_classes)])
y_true_binary_indicator = lb.transform(y_true)
y_pred_binary_indicator = lb.transform(y_pred)
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
# List of list of labels
measure = assert_warns(DeprecationWarning, metrics, y_true, y_pred,
normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(ignore_warnings(metrics)(y_true, y_pred,
normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
# Indicator matrix format
measure = metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
return_indicator=True,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel sequence
y_true = 2 * [(1, 2, ), (1, ), (0, ), (0, 1), (1, 2)]
y_pred = 2 * [(0, 2, ), (2, ), (0, ), (2, ), (1,)]
y_score = random_state.randn(10, 3)
for name in MULTILABELS_METRICS:
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
# multilabel indicator
_, ya = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=0, n_samples=100,
return_indicator=True, allow_unlabeled=False)
_, yb = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=1, n_samples=100,
return_indicator=True, allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause | -917,621,816,276,035,200 | 38.929284 | 79 | 0.612302 | false |
paolodedios/tensorflow | tensorflow/python/kernel_tests/partitioned_variables_test.py | 6 | 26698 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for partitioned_variables.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver as saver_lib
class PartitionerCreatorsTest(test.TestCase):
def testFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def testFixedSizePartitionerInt64(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(4, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable("v0", dtype=dtypes.int64, shape=[20])
v0_list = v0._get_variable_list()
self.assertEqual(len(v0_list), 4)
def testResourceFixedSizePartitioner(self):
with self.cached_session():
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope(
"root", partitioner=partitioner, use_resource=True):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
def _testVariableAxisSizePartitioner(self,
name,
axis,
max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = partitioned_variables.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testVariableAxisSizePartitioner(self):
with self.cached_session():
# Create a partitioned variable of shape (4, 8, 16, 32) type float32
# Bytes per slice along the given axes:
# 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
# 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
# 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
# 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3
# Now partition it in different ways...
# No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
self._testVariableAxisSizePartitioner(
"v0",
axis=0,
max_shard_bytes=131072,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
self._testVariableAxisSizePartitioner(
"v1",
axis=1,
max_shard_bytes=65536,
expected_axis_shards=1,
expected_partitions=(1, 1, 1, 1))
# Slice into 2 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 16 / 8 = 2
self._testVariableAxisSizePartitioner(
"v2",
axis=2,
max_shard_bytes=32768,
expected_axis_shards=2,
expected_partitions=(1, 1, 2, 1))
# This partitioner makes sure we maximize the number of shards along
# axis 3. Slice it into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = 2048 / 2048 = 1
# axis_shards = 32 / 1 = 32
self._testVariableAxisSizePartitioner(
"v3a",
axis=3,
max_shard_bytes=2048,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# This partitioner makes sure we do not go past the bound of allowable
# number of shards along axis 3.
# Slice into 32 parts:
# bytes_per_slice = 2048
# slices_per_shard = max(1, 1024 / 2048) = 1
# axis_shards = 32 / 1 = 32
# Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
self._testVariableAxisSizePartitioner(
"v3b",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32))
# Specify max_shards so that it won't affect sharding.
self._testVariableAxisSizePartitioner(
"v3c",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=32,
expected_partitions=(1, 1, 1, 32),
max_shards=33)
# Specify max_shards so that it will affect sharding.
self._testVariableAxisSizePartitioner(
"v3d",
axis=3,
max_shard_bytes=1024,
expected_axis_shards=2,
expected_partitions=(1, 1, 1, 2),
max_shards=2)
# Use the partitioner with strings
partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner( # pylint: disable=line-too-long
axis=3,
max_shard_bytes=32768,
bytes_per_string_element=8)
with variable_scope.variable_scope(
"root", partitioner=partitioner_axis3_str):
v3str = variable_scope.get_variable(
"v3str",
initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32), # pylint: disable=too-many-function-args
dtype=dtypes.string,
shape=(4, 8, 16, 32))
v3str_list = v3str._get_variable_list()
v3str_part = v3str._get_partitions()
# Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
# which is equal to 4096. Setting a max_shard_bytes of 32768
# and we should get a split of 4.
# Slice into 4 parts:
# bytes_per_slice = 4096
# slices_per_shard = 32768 / 4096 = 8
# axis_shards = 32 / 8 = 4
self.assertEqual(len(v3str_list), 4)
self.assertAllEqual(v3str_part, (1, 1, 1, 4))
def _testMinMaxVariablePartitioner(self, max_partitions, axis, min_slice_size,
var_name, var_shape, expected_axis_shards,
expected_partitions):
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=max_partitions, axis=axis, min_slice_size=min_slice_size)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
var_name, dtype=dtypes.float32, shape=var_shape)
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
def testMinMaxVariablePartitioner(self):
with self.cached_session():
# Partitioning a variable of shape=[2048] with a minimum of 2K per slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=2 << 10,
var_name="v0_0",
var_shape=[2048],
expected_axis_shards=4,
expected_partitions=[4])
# Partitioning a variable of shape=[2048, 1024] with a minimum of 256K per
# slice.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v0",
var_shape=[2048, 1024],
expected_axis_shards=32,
expected_partitions=[32, 1])
# max_partitions restricts partitioning of the variable.
self._testMinMaxVariablePartitioner(
max_partitions=16,
axis=0,
min_slice_size=256 << 10,
var_name="v1_max",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
self._testMinMaxVariablePartitioner(
max_partitions=1,
axis=0,
min_slice_size=256 << 10,
var_name="v2_max",
var_shape=[2048, 1024],
expected_axis_shards=1,
expected_partitions=[1, 1])
# Reducing/Increasing min_slice_size proportionately increases/reduces the
# number of partitions.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=128 << 10,
var_name="v3_slice",
var_shape=[2048, 1024],
expected_axis_shards=64,
expected_partitions=[64, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=512 << 10,
var_name="v4_slice",
var_shape=[2048, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1])
# Partitioning the variable along a different axis.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=1,
min_slice_size=256 << 10,
var_name="v5_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 3, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=3,
min_slice_size=256 << 10,
var_name="v6_axis",
var_shape=[64, 1024, 1, 3],
expected_axis_shards=3,
expected_partitions=[1, 1, 1, 3])
# Can not partition the variable more than what its shape allows.
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v7_shape",
var_shape=[16, 128, 1024],
expected_axis_shards=16,
expected_partitions=[16, 1, 1])
self._testMinMaxVariablePartitioner(
max_partitions=100,
axis=0,
min_slice_size=256 << 10,
var_name="v8_shape",
var_shape=[4, 512, 1024],
expected_axis_shards=4,
expected_partitions=[4, 1, 1])
def _IotaInitializer(shape, dtype=dtypes.float32, partition_info=None):
assert dtype == dtypes.float32
if len(shape) == 1:
return range(shape[0])
else:
val = _IotaInitializer(shape[1:], dtype)
return [[(10**i) * v for v in val] for i in range(shape[0])]
class PartitionedVariablesTestCase(test.TestCase):
def _TestSaveSpec(self, slices, expected_specs):
self.assertEqual(len(expected_specs), len(slices))
for i in xrange(len(expected_specs)):
self.assertEqual(expected_specs[i], slices[i]._save_slice_info.spec)
def testVecConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([1, 2, 3, 4])
vs = partitioned_variables.create_partitioned_variables([4], [4], rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 4, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["4 0,1", "4 1,1", "4 2,1", "4 3,1"])
def testConstantInit(self):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
vs = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd_par)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.int32] * 2, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, ["2 4 0,2:0,2", "2 4 0,2:2,2"])
def _testNameHelper(self, use_resource=False):
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope("hi", use_resource=use_resource):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hi/PartitionedVariable", var1_name)
self.assertEqual("hi/PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test same variable.
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with variable_scope.variable_scope(
"hola", use_resource=use_resource) as vs:
vs1 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
with variable_scope.variable_scope(
vs, reuse=True, use_resource=use_resource):
vs2 = partitioned_variables.create_partitioned_variables(
[2, 4], [1, 2], rnd_par, dtype=dtypes.int32)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
self.assertEqual("hola/PartitionedVariable", var1_name)
self.assertEqual("hola/PartitionedVariable", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
# Test name_scope
with self.cached_session():
rnd_par = constant_op.constant([[1, 2, 3, 4], [5, 6, 7, 8]])
with ops.name_scope("ola"):
vs1 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
vs2 = partitioned_variables.create_partitioned_variables([2, 4], [1, 2],
rnd_par)
self.evaluate(variables.global_variables_initializer())
var1_name = vs1[0]._save_slice_info.full_name
var2_name = vs2[0]._save_slice_info.full_name
# Currently, the name scope 'ola' has no effect.
self.assertEqual("PartitionedVariable", var1_name)
self.assertEqual("PartitionedVariable_1", var2_name)
self.assertEqual(var1_name + "/part_0:0", vs1[0].name)
self.assertEqual(var1_name + "/part_1:0", vs1[1].name)
self.assertEqual(var2_name + "/part_0:0", vs2[0].name)
self.assertEqual(var2_name + "/part_1:0", vs2[1].name)
@test_util.run_deprecated_v1
def testName(self):
self._testNameHelper(use_resource=False)
def testResourceName(self):
self._testNameHelper(use_resource=True)
def testRandomInitValue(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([200, 40]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 10], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 1)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self.assertEqual([dtypes.float32] * 10, [v.dtype.base_dtype for v in vs])
self._TestSaveSpec(vs, [
"200 40 0,200:0,4", "200 40 0,200:4,4", "200 40 0,200:8,4",
"200 40 0,200:12,4", "200 40 0,200:16,4", "200 40 0,200:20,4",
"200 40 0,200:24,4", "200 40 0,200:28,4", "200 40 0,200:32,4",
"200 40 0,200:36,4"
])
def testRandomInitUnevenPartitions(self):
with self.cached_session():
rnd = variables.Variable(
random_ops.random_uniform([20, 43], dtype=dtypes.float64))
var_lists = [
partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, i], rnd.initialized_value())
for i in xrange(1, 10)
]
self.evaluate(variables.global_variables_initializer())
rnd_val = self.evaluate(rnd)
# Only check the slice save specs for the first 5 tf.
save_specs = [
# One slice
["20 43 0,20:0,43"],
# Two slices
["20 43 0,20:0,22", "20 43 0,20:22,21"],
# Three slices
["20 43 0,20:0,15", "20 43 0,20:15,14", "20 43 0,20:29,14"],
# Four slices
[
"20 43 0,20:0,11", "20 43 0,20:11,11", "20 43 0,20:22,11",
"20 43 0,20:33,10"
],
# Five slices
[
"20 43 0,20:0,9", "20 43 0,20:9,9", "20 43 0,20:18,9",
"20 43 0,20:27,8", "20 43 0,20:35,8"
]
]
for i, vs in enumerate(var_lists):
var_val = array_ops.concat(vs, 1)
self.assertAllClose(rnd_val, var_val)
self.assertEqual([dtypes.float64] * len(vs),
[v.dtype.base_dtype for v in vs])
if i < len(save_specs):
self._TestSaveSpec(vs, save_specs[i])
def testDegenerate(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [1, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, ["10 43 0,10:0,43"])
def testSliceSizeOne(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
vs = partitioned_variables.create_partitioned_variables(
rnd.get_shape(), [10, 1], rnd.initialized_value())
self.evaluate(variables.global_variables_initializer())
val = array_ops.concat(vs, 0)
rnd = self.evaluate(rnd)
self.assertAllClose(rnd, val)
self._TestSaveSpec(vs, [
"10 43 0,1:0,43", "10 43 1,1:0,43", "10 43 2,1:0,43",
"10 43 3,1:0,43", "10 43 4,1:0,43", "10 43 5,1:0,43",
"10 43 6,1:0,43", "10 43 7,1:0,43", "10 43 8,1:0,43", "10 43 9,1:0,43"
])
def testIotaInitializer(self):
self.assertAllClose([0., 1., 2., 3.], _IotaInitializer([4]))
self.assertAllClose([[0., 1.], [0., 10.], [0., 100.], [0., 1000.]],
_IotaInitializer([4, 2]))
with self.cached_session():
vs = partitioned_variables.create_partitioned_variables([13, 5], [3, 1],
_IotaInitializer)
self.evaluate(variables.global_variables_initializer())
slice0 = _IotaInitializer([5, 5])
slice1 = _IotaInitializer([4, 5])
slice2 = _IotaInitializer([4, 5])
val = array_ops.concat(vs, 0)
self.assertAllClose(slice0 + slice1 + slice2, val)
self._TestSaveSpec(vs, ["13 5 0,5:0,5", "13 5 5,4:0,5", "13 5 9,4:0,5"])
@test_util.run_deprecated_v1
def testRandomInitializer(self):
# Sanity check that the slices uses a different seed when using a random
# initializer function.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer())
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
# Negative test that proves that slices have the same values if
# the random initializer uses a seed.
with self.cached_session():
var0, var1 = partitioned_variables.create_partitioned_variables(
[20, 12], [1, 2], init_ops.random_uniform_initializer(seed=201))
self.evaluate(variables.global_variables_initializer())
val0, val1 = self.evaluate(var0).flatten(), self.evaluate(var1).flatten()
self.assertAllClose(val0, val1)
def testSomeErrors(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([10, 43]))
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10], [1, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 20], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 2, 3], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [11, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [20, 1], rnd.initialized_value())
with self.assertRaises(ValueError):
partitioned_variables.create_partitioned_variables(
[10, 43], [1, 50], rnd.initialized_value())
@test_util.run_deprecated_v1
def testControlDepsNone(self):
with self.cached_session() as session:
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dependency.
d = constant_op.constant(2.0)
# Partitioned variables do not.
var_x = variable_scope.get_variable(
"x",
shape=[2],
initializer=init_ops.ones_initializer(),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_read
]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
@test_util.run_deprecated_v1
def testConcat(self):
with self.cached_session() as session:
var_x = variable_scope.get_variable(
"x",
initializer=constant_op.constant([1., 2.]),
partitioner=partitioned_variables.variable_axis_size_partitioner(4))
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [
op for op in session.graph.get_operations()
if op not in ops_before_concat
]
concat_control_inputs = [
ci for op in concat_ops for ci in op.control_inputs
]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(value, var_x.as_tensor())
def testMetaGraphSaveLoad(self):
save_prefix = os.path.join(self.get_temp_dir(), "ckpt")
save_graph = ops.Graph()
with save_graph.as_default(), self.session(
graph=save_graph) as session:
partitioner = partitioned_variables.fixed_size_partitioner(5, axis=0)
with variable_scope.variable_scope("root", partitioner=partitioner):
v0 = variable_scope.get_variable(
"v0", dtype=dtypes.float32, shape=(10, 10))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), 5)
self.assertAllEqual(v0_part, (5, 1))
self.evaluate(variables.global_variables_initializer())
save_graph.get_collection_ref("partvar").append(v0)
saver = saver_lib.Saver()
save_graph.finalize()
save_path = saver.save(sess=session, save_path=save_prefix)
previous_value = session.run(
save_graph.get_tensor_by_name(v0.name + ":0"))
restore_graph = ops.Graph()
with restore_graph.as_default(), self.session(
graph=restore_graph) as session:
saver = saver_lib.import_meta_graph(save_path + ".meta")
saver.restore(sess=session, save_path=save_path)
v0, = save_graph.get_collection_ref("partvar")
self.assertIsInstance(v0, variables.PartitionedVariable)
self.assertAllEqual(
previous_value,
session.run(restore_graph.get_tensor_by_name(v0.name + ":0")))
if __name__ == "__main__":
test.main()
| apache-2.0 | -3,853,548,554,753,405,400 | 40.780908 | 121 | 0.607424 | false |
diofant/diofant | diofant/vector/__init__.py | 2 | 1125 | """
Package for symbolic vector algebra in 3D.
"""
from .coordsysrect import CoordSysCartesian
from .deloperator import Del
from .dyadic import BaseDyadic, Dyadic, DyadicAdd, DyadicMul, DyadicZero
from .functions import (curl, divergence, express, gradient, is_conservative,
is_solenoidal, matrix_to_vector, scalar_potential,
scalar_potential_difference)
from .orienters import (AxisOrienter, BodyOrienter, QuaternionOrienter,
SpaceOrienter)
from .point import Point
from .scalar import BaseScalar
from .vector import BaseVector, Vector, VectorAdd, VectorMul, VectorZero
__all__ = ('CoordSysCartesian', 'Del', 'BaseDyadic', 'Dyadic', 'DyadicAdd',
'DyadicMul', 'DyadicZero', 'curl', 'divergence', 'express',
'gradient', 'is_conservative', 'is_solenoidal', 'matrix_to_vector',
'scalar_potential', 'scalar_potential_difference', 'AxisOrienter',
'BodyOrienter', 'QuaternionOrienter', 'SpaceOrienter', 'Point',
'BaseScalar', 'BaseVector', 'Vector', 'VectorAdd',
'VectorMul', 'VectorZero')
| bsd-3-clause | 1,192,021,053,387,874,800 | 45.875 | 78 | 0.672 | false |
dongweiming/code | vilya/models/activity.py | 3 | 3786 | # -*- coding: utf-8 -*-
from vilya.config import DOMAIN as CODE_URL
class Activity(object):
def __init__(self, data):
self.data = data
def to_line(self):
return format_activity(self.data)
def format_activity(data):
type = data.get('type')
if type in ('team_created', 'team_joined'):
_data = _format_team_data(data)
elif type in ('recommend', 'commit_comment', 'push'):
_data = _format_people_data(data)
elif type in ('issue'):
_data = _format_issue_data(data)
elif type in ('issue_comment'):
_data = _format_issue_comment_data(data)
elif type in ('pull_request', 'code_review'):
_data = _format_pullrequest_data(data)
return _data
def _format_repository_data(data):
return ''
def _format_team_data(data):
_author = data.get('author')
_team = data.get('name')
if data['type'] == 'team_created':
type = 'created'
elif data['type'] == 'team_joined':
type = 'joined'
return "%s %s team:<a href=\"%s\">%s</a>" % (
_author, type, CODE_URL + data.get('url'), _team)
def _format_issue_data(data):
if data['state'] == 'closed':
type = 'closed'
elif data['state'] == 'open':
type = 'opened'
_author = data.get('author')
tmpl = "%s %s issue:<a href=\"%s\">%s</a> on <a href=\"%s\">%s</a>"
return tmpl % (_author, type, CODE_URL + data.get('url'),
data.get('title'), CODE_URL + data.get('target_url'),
data.get('target_name'))
def _format_issue_comment_data(data):
_author = data.get('author')
tmpl = "%s commented issue:<a href=\"%s\">%s</a> on <a href=\"%s\">%s</a>"
return tmpl % (_author, CODE_URL + data.get('url'), data.get('title'),
CODE_URL + data.get('target_url'), data.get('target_name'))
def _format_pullrequest_data(data):
if data['type'] == 'pull_request':
if data['status'] == 'merged':
type = 'merged'
_author = data.get('owner')
elif data['status'] == 'unmerge':
type = 'opened'
_author = data.get('commiter')
elif data['status'] == 'closed':
type = 'closed'
_author = data.get('commiter')
else:
type = '?'
_author = '?'
_title = data.get('title')
_url = data.get('url')
_project = data.get('to_proj', '')
elif data['type'] == 'code_review':
type = 'commented'
_author = data.get('author')
_title = data.get('ticket')
_url = data.get('url')
_project = data.get('proj', '')
_project_url = "/%s" % _project.split(':')[0]
tmpl = "%s %s pr:<a href=\"%s\">%s</a> on <a href=\"%s\">%s</a>"
return tmpl % (_author, type, CODE_URL + _url, _title,
CODE_URL + _project_url, _project)
def _format_people_data(data):
if data['type'] == 'recommend':
return ''
elif data['type'] == 'commit_comment':
type = 'commented'
_author = data.get('sender')
_project = data.get('proj')
_project_url = "/%s" % _project
return "%s %s <a href=\"%s\">%s</a> on <a href=\"%s\">%s</a>" % (
_author, type, CODE_URL + data.get('url'), data.get('ref'),
CODE_URL + _project_url, _project)
elif data['type'] == 'push':
type = 'pushed'
_project = data.get('repo_name')
_project_url = "/%s" % _project
_project = _project + ":" + data.get('branch')
return "%s %s <a href=\"%s\">%s</a>" % ("someone",
type,
CODE_URL + _project_url,
_project)
return ''
| bsd-3-clause | 1,793,018,788,363,888,000 | 32.504425 | 78 | 0.496038 | false |
jjanssen/django-cms-patches | cms/tests/site.py | 2 | 1530 | from django.conf import settings
from cms.models import Page
from cms.tests.base import CMSTestCase
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
settings.SITE_ID = 1
u = User(username="test", is_staff = True, is_active = True, is_superuser = True)
u.set_password("test")
u.save()
# setup sites
Site(domain="sample2.com", name="sample2.com").save() # pk 2
Site(domain="sample3.com", name="sample3.com").save() # pk 3
self.login_user(u)
def test_01_site_framework(self):
#Test the site framework, and test if it's possible to disable it
settings.SITE_ID = 2
page_2a = self.create_page(site=2)
page_3b = self.create_page(site=3)
settings.SITE_ID = 3
page_3a = self.create_page(site=3)
# with param
self.assertEqual(Page.objects.on_site(2).count(), 1)
self.assertEqual(Page.objects.on_site(3).count(), 2)
# without param
settings.SITE_ID = 3
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
settings.SITE_ID = 2
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
settings.SITE_ID = 1
| bsd-3-clause | -7,804,570,583,980,815,000 | 30.244898 | 89 | 0.587582 | false |
zouyapeng/horizon_change | openstack_dashboard/dashboards/project/access_and_security/floating_ips/urls.py | 7 | 1109 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.access_and_security.\
floating_ips import views
urlpatterns = patterns('',
url(r'^associate/$', views.AssociateView.as_view(), name='associate'),
url(r'^allocate/$', views.AllocateView.as_view(), name='allocate')
)
| apache-2.0 | -7,004,164,407,005,683,000 | 37.241379 | 78 | 0.739405 | false |
marxin/youtube-dl | youtube_dl/extractor/turbo.py | 37 | 2432 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
qualities,
xpath_text,
)
class TurboIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
_API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
_TEST = {
'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
'md5': '33f4b91099b36b5d5a91f84b5bcba600',
'info_dict': {
'id': '454443',
'ext': 'mp4',
'duration': 3715,
'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
'description': 'Retrouvez dans cette rubrique toutes les vidéos de l\'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
playlist = self._download_xml(self._API_URL.format(video_id), video_id)
item = playlist.find('./channel/item')
if item is None:
raise ExtractorError('Playlist item was not found', expected=True)
title = xpath_text(item, './title', 'title')
duration = int_or_none(xpath_text(item, './durate', 'duration'))
thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
description = self._og_search_description(webpage)
formats = []
get_quality = qualities(['3g', 'sd', 'hq'])
for child in item:
m = re.search(r'url_video_(?P<quality>.+)', child.tag)
if m:
quality = m.group('quality')
formats.append({
'format_id': quality,
'url': child.text,
'quality': get_quality(quality),
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'description': description,
'formats': formats,
}
| unlicense | 7,689,451,455,776,048,000 | 35.283582 | 180 | 0.558618 | false |
genome-vendor/cython | build/lib.linux-x86_64-2.6/Cython/Compiler/Symtab.py | 7 | 94602 | #
# Symbol Table
#
import copy
import re
from Errors import warning, error, InternalError
from StringEncoding import EncodedString
import Options, Naming
import PyrexTypes
from PyrexTypes import py_object_type, unspecified_type
from TypeSlots import \
pyfunction_signature, pymethod_signature, \
get_special_method_signature, get_property_accessor_signature
import Code
import __builtin__ as builtins
iso_c99_keywords = set(
['auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if',
'int', 'long', 'register', 'return', 'short', 'signed', 'sizeof',
'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void',
'volatile', 'while',
'_Bool', '_Complex'', _Imaginary', 'inline', 'restrict'])
def c_safe_identifier(cname):
# There are some C limitations on struct entry names.
if ((cname[:2] == '__'
and not (cname.startswith(Naming.pyrex_prefix)
or cname == '__weakref__'))
or cname in iso_c99_keywords):
cname = Naming.pyrex_prefix + cname
return cname
class BufferAux(object):
writable_needed = False
def __init__(self, buflocal_nd_var, rcbuf_var):
self.buflocal_nd_var = buflocal_nd_var
self.rcbuf_var = rcbuf_var
def __repr__(self):
return "<BufferAux %r>" % self.__dict__
class Entry(object):
# A symbol table entry in a Scope or ModuleNamespace.
#
# name string Python name of entity
# cname string C name of entity
# type PyrexType Type of entity
# doc string Doc string
# init string Initial value
# visibility 'private' or 'public' or 'extern'
# is_builtin boolean Is an entry in the Python builtins dict
# is_cglobal boolean Is a C global variable
# is_pyglobal boolean Is a Python module-level variable
# or class attribute during
# class construction
# is_member boolean Is an assigned class member
# is_pyclass_attr boolean Is a name in a Python class namespace
# is_variable boolean Is a variable
# is_cfunction boolean Is a C function
# is_cmethod boolean Is a C method of an extension type
# is_builtin_cmethod boolean Is a C method of a builtin type (implies is_cmethod)
# is_unbound_cmethod boolean Is an unbound C method of an extension type
# is_final_cmethod boolean Is non-overridable C method
# is_inline_cmethod boolean Is inlined C method
# is_anonymous boolean Is a anonymous pyfunction entry
# is_type boolean Is a type definition
# is_cclass boolean Is an extension class
# is_cpp_class boolean Is a C++ class
# is_const boolean Is a constant
# is_property boolean Is a property of an extension type:
# doc_cname string or None C const holding the docstring
# getter_cname string C func for getting property
# setter_cname string C func for setting or deleting property
# is_self_arg boolean Is the "self" arg of an exttype method
# is_arg boolean Is the arg of a method
# is_local boolean Is a local variable
# in_closure boolean Is referenced in an inner scope
# is_readonly boolean Can't be assigned to
# func_cname string C func implementing Python func
# func_modifiers [string] C function modifiers ('inline')
# pos position Source position where declared
# namespace_cname string If is_pyglobal, the C variable
# holding its home namespace
# pymethdef_cname string PyMethodDef structure
# signature Signature Arg & return types for Python func
# as_variable Entry Alternative interpretation of extension
# type name or builtin C function as a variable
# xdecref_cleanup boolean Use Py_XDECREF for error cleanup
# in_cinclude boolean Suppress C declaration code
# enum_values [Entry] For enum types, list of values
# qualified_name string "modname.funcname" or "modname.classname"
# or "modname.classname.funcname"
# is_declared_generic boolean Is declared as PyObject * even though its
# type is an extension type
# as_module None Module scope, if a cimported module
# is_inherited boolean Is an inherited attribute of an extension type
# pystring_cname string C name of Python version of string literal
# is_interned boolean For string const entries, value is interned
# is_identifier boolean For string const entries, value is an identifier
# used boolean
# is_special boolean Is a special method or property accessor
# of an extension type
# defined_in_pxd boolean Is defined in a .pxd file (not just declared)
# api boolean Generate C API for C class or function
# utility_code string Utility code needed when this entry is used
#
# buffer_aux BufferAux or None Extra information needed for buffer variables
# inline_func_in_pxd boolean Hacky special case for inline function in pxd file.
# Ideally this should not be necesarry.
# might_overflow boolean In an arithmetic expression that could cause
# overflow (used for type inference).
# utility_code_definition For some Cython builtins, the utility code
# which contains the definition of the entry.
# Currently only supported for CythonScope entries.
# error_on_uninitialized Have Control Flow issue an error when this entry is
# used uninitialized
# cf_used boolean Entry is used
# is_fused_specialized boolean Whether this entry of a cdef or def function
# is a specialization
# TODO: utility_code and utility_code_definition serves the same purpose...
inline_func_in_pxd = False
borrowed = 0
init = ""
visibility = 'private'
is_builtin = 0
is_cglobal = 0
is_pyglobal = 0
is_member = 0
is_pyclass_attr = 0
is_variable = 0
is_cfunction = 0
is_cmethod = 0
is_builtin_cmethod = False
is_unbound_cmethod = 0
is_final_cmethod = 0
is_inline_cmethod = 0
is_anonymous = 0
is_type = 0
is_cclass = 0
is_cpp_class = 0
is_const = 0
is_property = 0
doc_cname = None
getter_cname = None
setter_cname = None
is_self_arg = 0
is_arg = 0
is_local = 0
in_closure = 0
from_closure = 0
is_declared_generic = 0
is_readonly = 0
pyfunc_cname = None
func_cname = None
func_modifiers = []
final_func_cname = None
doc = None
as_variable = None
xdecref_cleanup = 0
in_cinclude = 0
as_module = None
is_inherited = 0
pystring_cname = None
is_identifier = 0
is_interned = 0
used = 0
is_special = 0
defined_in_pxd = 0
is_implemented = 0
api = 0
utility_code = None
is_overridable = 0
buffer_aux = None
prev_entry = None
might_overflow = 0
fused_cfunction = None
is_fused_specialized = False
utility_code_definition = None
needs_property = False
in_with_gil_block = 0
from_cython_utility_code = None
error_on_uninitialized = False
cf_used = True
outer_entry = None
def __init__(self, name, cname, type, pos = None, init = None):
self.name = name
self.cname = cname
self.type = type
self.pos = pos
self.init = init
self.overloaded_alternatives = []
self.cf_assignments = []
self.cf_references = []
self.inner_entries = []
self.defining_entry = self
def __repr__(self):
return "%s(<%x>, name=%s, type=%s)" % (type(self).__name__, id(self), self.name, self.type)
def redeclared(self, pos):
error(pos, "'%s' does not match previous declaration" % self.name)
error(self.pos, "Previous declaration is here")
def all_alternatives(self):
return [self] + self.overloaded_alternatives
def all_entries(self):
return [self] + self.inner_entries
class InnerEntry(Entry):
"""
An entry in a closure scope that represents the real outer Entry.
"""
from_closure = True
def __init__(self, outer_entry, scope):
Entry.__init__(self, outer_entry.name,
outer_entry.cname,
outer_entry.type,
outer_entry.pos)
self.outer_entry = outer_entry
self.scope = scope
# share state with (outermost) defining entry
outermost_entry = outer_entry
while outermost_entry.outer_entry:
outermost_entry = outermost_entry.outer_entry
self.defining_entry = outermost_entry
self.inner_entries = outermost_entry.inner_entries
self.cf_assignments = outermost_entry.cf_assignments
self.cf_references = outermost_entry.cf_references
self.overloaded_alternatives = outermost_entry.overloaded_alternatives
self.inner_entries.append(self)
def __getattr__(self, name):
return getattr(self.defining_entry, name)
def all_entries(self):
return self.defining_entry.all_entries()
class Scope(object):
# name string Unqualified name
# outer_scope Scope or None Enclosing scope
# entries {string : Entry} Python name to entry, non-types
# const_entries [Entry] Constant entries
# type_entries [Entry] Struct/union/enum/typedef/exttype entries
# sue_entries [Entry] Struct/union/enum entries
# arg_entries [Entry] Function argument entries
# var_entries [Entry] User-defined variable entries
# pyfunc_entries [Entry] Python function entries
# cfunc_entries [Entry] C function entries
# c_class_entries [Entry] All extension type entries
# cname_to_entry {string : Entry} Temp cname to entry mapping
# return_type PyrexType or None Return type of function owning scope
# is_builtin_scope boolean Is the builtin scope of Python/Cython
# is_py_class_scope boolean Is a Python class scope
# is_c_class_scope boolean Is an extension type scope
# is_closure_scope boolean Is a closure scope
# is_passthrough boolean Outer scope is passed directly
# is_cpp_class_scope boolean Is a C++ class scope
# is_property_scope boolean Is a extension type property scope
# scope_prefix string Disambiguator for C names
# in_cinclude boolean Suppress C declaration code
# qualified_name string "modname" or "modname.classname"
# Python strings in this scope
# nogil boolean In a nogil section
# directives dict Helper variable for the recursive
# analysis, contains directive values.
# is_internal boolean Is only used internally (simpler setup)
is_builtin_scope = 0
is_py_class_scope = 0
is_c_class_scope = 0
is_closure_scope = 0
is_passthrough = 0
is_cpp_class_scope = 0
is_property_scope = 0
is_module_scope = 0
is_internal = 0
scope_prefix = ""
in_cinclude = 0
nogil = 0
fused_to_specific = None
def __init__(self, name, outer_scope, parent_scope):
# The outer_scope is the next scope in the lookup chain.
# The parent_scope is used to derive the qualified name of this scope.
self.name = name
self.outer_scope = outer_scope
self.parent_scope = parent_scope
mangled_name = "%d%s_" % (len(name), name)
qual_scope = self.qualifying_scope()
if qual_scope:
self.qualified_name = qual_scope.qualify_name(name)
self.scope_prefix = qual_scope.scope_prefix + mangled_name
else:
self.qualified_name = EncodedString(name)
self.scope_prefix = mangled_name
self.entries = {}
self.const_entries = []
self.type_entries = []
self.sue_entries = []
self.arg_entries = []
self.var_entries = []
self.pyfunc_entries = []
self.cfunc_entries = []
self.c_class_entries = []
self.defined_c_classes = []
self.imported_c_classes = {}
self.cname_to_entry = {}
self.string_to_entry = {}
self.identifier_to_entry = {}
self.num_to_entry = {}
self.obj_to_entry = {}
self.buffer_entries = []
self.lambda_defs = []
self.return_type = None
self.id_counters = {}
def __deepcopy__(self, memo):
return self
def merge_in(self, other, merge_unused=True, whitelist=None):
# Use with care...
entries = []
for name, entry in other.entries.iteritems():
if not whitelist or name in whitelist:
if entry.used or merge_unused:
entries.append((name, entry))
self.entries.update(entries)
for attr in ('const_entries',
'type_entries',
'sue_entries',
'arg_entries',
'var_entries',
'pyfunc_entries',
'cfunc_entries',
'c_class_entries'):
self_entries = getattr(self, attr)
names = set([e.name for e in self_entries])
for entry in getattr(other, attr):
if (entry.used or merge_unused) and entry.name not in names:
self_entries.append(entry)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.qualified_name)
def qualifying_scope(self):
return self.parent_scope
def mangle(self, prefix, name = None):
if name:
return "%s%s%s" % (prefix, self.scope_prefix, name)
else:
return self.parent_scope.mangle(prefix, self.name)
def mangle_internal(self, name):
# Mangle an internal name so as not to clash with any
# user-defined name in this scope.
prefix = "%s%s_" % (Naming.pyrex_prefix, name)
return self.mangle(prefix)
#return self.parent_scope.mangle(prefix, self.name)
def mangle_class_private_name(self, name):
if self.parent_scope:
return self.parent_scope.mangle_class_private_name(name)
return name
def next_id(self, name=None):
# Return a cname fragment that is unique for this module
counters = self.global_scope().id_counters
try:
count = counters[name] + 1
except KeyError:
count = 0
counters[name] = count
if name:
if not count:
# unique names don't need a suffix, reoccurrences will get one
return name
return '%s%d' % (name, count)
else:
return '%d' % count
def global_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.global_scope()
def builtin_scope(self):
""" Return the module-level scope containing this scope. """
return self.outer_scope.builtin_scope()
def declare(self, name, cname, type, pos, visibility, shadow = 0, is_type = 0):
# Create new entry, and add to dictionary if
# name is not None. Reports a warning if already
# declared.
if type.is_buffer and not isinstance(self, LocalScope): # and not is_type:
error(pos, 'Buffer types only allowed as function local variables')
if not self.in_cinclude and cname and re.match("^_[_A-Z]+$", cname):
# See http://www.gnu.org/software/libc/manual/html_node/Reserved-Names.html#Reserved-Names
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
if visibility == 'extern':
warning(pos, "'%s' redeclared " % name, 0)
elif visibility != 'ignore':
error(pos, "'%s' redeclared " % name)
entry = Entry(name, cname, type, pos = pos)
entry.in_cinclude = self.in_cinclude
if name:
entry.qualified_name = self.qualify_name(name)
# if name in entries and self.is_cpp():
# entries[name].overloaded_alternatives.append(entry)
# else:
# entries[name] = entry
if not shadow:
entries[name] = entry
if type.is_memoryviewslice:
import MemoryView
entry.init = MemoryView.memslice_entry_init
entry.scope = self
entry.visibility = visibility
return entry
def qualify_name(self, name):
return EncodedString("%s.%s" % (self.qualified_name, name))
def declare_const(self, name, type, value, pos, cname = None, visibility = 'private', api = 0):
# Add an entry for a named constant.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.enum_prefix, name)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_const = 1
entry.value_node = value
return entry
def declare_type(self, name, type, pos,
cname = None, visibility = 'private', api = 0, defining = 1,
shadow = 0, template = 0):
# Add an entry for a type definition.
if not cname:
cname = name
entry = self.declare(name, cname, type, pos, visibility, shadow,
is_type=True)
entry.is_type = 1
entry.api = api
if defining:
self.type_entries.append(entry)
if not template:
type.entry = entry
# here we would set as_variable to an object representing this type
return entry
def declare_typedef(self, name, base_type, pos, cname = None,
visibility = 'private', api = 0):
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
try:
type = PyrexTypes.create_typedef_type(name, base_type, cname,
(visibility == 'extern'))
except ValueError, e:
error(pos, e.args[0])
type = PyrexTypes.error_type
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api)
type.qualified_name = entry.qualified_name
return entry
def declare_struct_or_union(self, name, kind, scope,
typedef_flag, pos, cname = None,
visibility = 'private', api = 0,
packed = False):
# Add an entry for a struct or union definition.
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CStructOrUnionType(
name, kind, scope, typedef_flag, cname, packed)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, api = api,
defining = scope is not None)
self.sue_entries.append(entry)
type.entry = entry
else:
if not (entry.is_type and entry.type.is_struct_or_union
and entry.type.kind == kind):
warning(pos, "'%s' redeclared " % name, 0)
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
self.check_previous_typedef_flag(entry, typedef_flag, pos)
self.check_previous_visibility(entry, visibility, pos)
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
return entry
def declare_cpp_class(self, name, scope,
pos, cname = None, base_classes = (),
visibility = 'extern', templates = None):
if cname is None:
if self.in_cinclude or (visibility != 'private'):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
base_classes = list(base_classes)
entry = self.lookup_here(name)
if not entry:
type = PyrexTypes.CppClassType(
name, scope, cname, base_classes, templates = templates)
entry = self.declare_type(name, type, pos, cname,
visibility = visibility, defining = scope is not None)
self.sue_entries.append(entry)
else:
if not (entry.is_type and entry.type.is_cpp_class):
error(pos, "'%s' redeclared " % name)
return None
elif scope and entry.type.scope:
warning(pos, "'%s' already defined (ignoring second definition)" % name, 0)
else:
if scope:
entry.type.scope = scope
self.type_entries.append(entry)
if base_classes:
if entry.type.base_classes and entry.type.base_classes != base_classes:
error(pos, "Base type does not match previous declaration")
else:
entry.type.base_classes = base_classes
if templates or entry.type.templates:
if templates != entry.type.templates:
error(pos, "Template parameters do not match previous declaration")
def declare_inherited_attributes(entry, base_classes):
for base_class in base_classes:
if base_class is PyrexTypes.error_type:
continue
if base_class.scope is None:
error(pos, "Cannot inherit from incomplete type")
else:
declare_inherited_attributes(entry, base_class.base_classes)
entry.type.scope.declare_inherited_cpp_attributes(base_class.scope)
if entry.type.scope:
declare_inherited_attributes(entry, base_classes)
entry.type.scope.declare_var(name="this", cname="this", type=PyrexTypes.CPtrType(entry.type), pos=entry.pos)
if self.is_cpp_class_scope:
entry.type.namespace = self.outer_scope.lookup(self.name).type
return entry
def check_previous_typedef_flag(self, entry, typedef_flag, pos):
if typedef_flag != entry.type.typedef_flag:
error(pos, "'%s' previously declared using '%s'" % (
entry.name, ("cdef", "ctypedef")[entry.type.typedef_flag]))
def check_previous_visibility(self, entry, visibility, pos):
if entry.visibility != visibility:
error(pos, "'%s' previously declared as '%s'" % (
entry.name, entry.visibility))
def declare_enum(self, name, pos, cname, typedef_flag,
visibility = 'private', api = 0):
if name:
if not cname:
if self.in_cinclude or (visibility == 'public' or api):
cname = name
else:
cname = self.mangle(Naming.type_prefix, name)
type = PyrexTypes.CEnumType(name, cname, typedef_flag)
else:
type = PyrexTypes.c_anon_enum_type
entry = self.declare_type(name, type, pos, cname = cname,
visibility = visibility, api = api)
entry.enum_values = []
self.sue_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a variable.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
entry.used = 1
if api:
entry.api = 1
entry.used = 1
return entry
def declare_builtin(self, name, pos):
return self.outer_scope.declare_builtin(name, pos)
def _declare_pyfunction(self, name, pos, visibility='extern', entry=None):
if entry and not entry.type.is_cfunction:
error(pos, "'%s' already declared" % name)
error(entry.pos, "Previous declaration is here")
entry = self.declare_var(name, py_object_type, pos, visibility=visibility)
entry.signature = pyfunction_signature
self.pyfunc_entries.append(entry)
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False, visibility='extern'):
# Add an entry for a Python function.
entry = self.lookup_here(name)
if not allow_redefine:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
if entry:
if entry.type.is_unspecified:
entry.type = py_object_type
elif entry.type is not py_object_type:
return self._declare_pyfunction(name, pos, visibility=visibility, entry=entry)
else: # declare entry stub
self.declare_var(name, py_object_type, pos, visibility=visibility)
entry = self.declare_var(None, py_object_type, pos,
cname=name, visibility='private')
entry.name = EncodedString(name)
entry.qualified_name = self.qualify_name(name)
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def declare_lambda_function(self, lambda_name, pos):
# Add an entry for an anonymous Python function.
func_cname = self.mangle(Naming.lambda_func_prefix + u'funcdef_', lambda_name)
pymethdef_cname = self.mangle(Naming.lambda_func_prefix + u'methdef_', lambda_name)
qualified_name = self.qualify_name(lambda_name)
entry = self.declare(None, func_cname, py_object_type, pos, 'private')
entry.name = lambda_name
entry.qualified_name = qualified_name
entry.pymethdef_cname = pymethdef_cname
entry.func_cname = func_cname
entry.signature = pyfunction_signature
entry.is_anonymous = True
return entry
def add_lambda_def(self, def_node):
self.lambda_defs.append(def_node)
def register_pyfunction(self, entry):
self.pyfunc_entries.append(entry)
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
if not cname:
if visibility != 'private' or api:
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry:
if visibility != 'private' and visibility != entry.visibility:
warning(pos, "Function '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
can_override = False
if self.is_cpp():
can_override = True
elif cname:
# if all alternatives have different cnames,
# it's safe to allow signature overrides
for alt_entry in entry.all_alternatives():
if not alt_entry.cname or cname == alt_entry.cname:
break # cname not unique!
else:
can_override = True
if can_override:
temp = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
temp.overloaded_alternatives = entry.all_alternatives()
entry = temp
else:
warning(pos, "Function signature does not match previous declaration", 1)
entry.type = type
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.add_cfunction(name, type, pos, cname, visibility, modifiers)
entry.func_cname = cname
if in_pxd and visibility != 'extern':
entry.defined_in_pxd = 1
if api:
entry.api = 1
if not defining and not in_pxd and visibility != 'extern':
error(pos, "Non-extern C function '%s' declared but not defined" % name)
if defining:
entry.is_implemented = True
if modifiers:
entry.func_modifiers = modifiers
if utility_code:
assert not entry.utility_code, "duplicate utility code definition in entry %s (%s)" % (name, cname)
entry.utility_code = utility_code
type.entry = entry
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a C function entry without giving it a func_cname.
entry = self.declare(name, cname, type, pos, visibility)
entry.is_cfunction = 1
if modifiers:
entry.func_modifiers = modifiers
self.cfunc_entries.append(entry)
return entry
def find(self, name, pos):
# Look up name, report error if not found.
entry = self.lookup(name)
if entry:
return entry
else:
error(pos, "'%s' is not declared" % name)
def find_imported_module(self, path, pos):
# Look up qualified name, must be a module, report error if not found.
# Path is a list of names.
scope = self
for name in path:
entry = scope.find(name, pos)
if not entry:
return None
if entry.as_module:
scope = entry.as_module
else:
error(pos, "'%s' is not a cimported module" % '.'.join(path))
return None
return scope
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
return (self.lookup_here(name)
or (self.outer_scope and self.outer_scope.lookup(name))
or None)
def lookup_here(self, name):
# Look up in this scope only, return None if not found.
return self.entries.get(name, None)
def lookup_target(self, name):
# Look up name in this scope only. Declare as Python
# variable if not found.
entry = self.lookup_here(name)
if not entry:
entry = self.declare_var(name, py_object_type, None)
return entry
def lookup_type(self, name):
entry = self.lookup(name)
if entry and entry.is_type:
if entry.type.is_fused and self.fused_to_specific:
return entry.type.specialize(self.fused_to_specific)
return entry.type
def lookup_operator(self, operator, operands):
if operands[0].type.is_cpp_class:
obj_type = operands[0].type
method = obj_type.scope.lookup("operator%s" % operator)
if method is not None:
res = PyrexTypes.best_match(operands[1:], method.all_alternatives())
if res is not None:
return res
function = self.lookup("operator%s" % operator)
if function is None:
return None
return PyrexTypes.best_match(operands, function.all_alternatives())
def lookup_operator_for_types(self, pos, operator, types):
from Nodes import Node
class FakeOperand(Node):
pass
operands = [FakeOperand(pos, type=type) for type in types]
return self.lookup_operator(operator, operands)
def use_utility_code(self, new_code):
self.global_scope().use_utility_code(new_code)
def generate_library_function_declarations(self, code):
# Generate extern decls for C library funcs used.
pass
def defines_any(self, names):
# Test whether any of the given names are
# defined in this scope.
for name in names:
if name in self.entries:
return 1
return 0
def infer_types(self):
from TypeInference import get_type_inferer
get_type_inferer().infer_types(self)
def is_cpp(self):
outer = self.outer_scope
if outer is None:
return False
else:
return outer.is_cpp()
def add_include_file(self, filename):
self.outer_scope.add_include_file(filename)
def get_refcounted_entries(self, include_weakref=False):
py_attrs = []
py_buffers = []
memoryview_slices = []
for entry in self.var_entries:
if entry.type.is_pyobject:
if include_weakref or entry.name != "__weakref__":
py_attrs.append(entry)
elif entry.type == PyrexTypes.c_py_buffer_type:
py_buffers.append(entry)
elif entry.type.is_memoryviewslice:
memoryview_slices.append(entry)
have_entries = py_attrs or py_buffers or memoryview_slices
return have_entries, (py_attrs, py_buffers, memoryview_slices)
class PreImportScope(Scope):
namespace_cname = Naming.preimport_cname
def __init__(self):
Scope.__init__(self, Options.pre_import, None, None)
def declare_builtin(self, name, pos):
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_variable = True
entry.is_pyglobal = True
return entry
class BuiltinScope(Scope):
# The builtin namespace.
is_builtin_scope = True
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
for name, definition in self.builtin_entries.iteritems():
cname, type = definition
self.declare_var(name, type, None, cname)
def lookup(self, name, language_level=None):
# 'language_level' is passed by ModuleScope
if language_level == 3:
if name == 'str':
name = 'unicode'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name):
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv = None,
utility_code = None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
utility_code = utility_code)
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname, utility_code = None, objstruct_cname = None):
name = EncodedString(name)
type = PyrexTypes.BuiltinObjectType(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern')
scope.directives = {}
if name == 'bool':
type.is_final_type = True
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
entry.utility_code = utility_code
var_entry = Entry(name = entry.name,
type = self.lookup('type').type, # make sure "type" is the first type declared...
pos = entry.pos,
cname = "((PyObject*)%s)" % entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
if Options.cache_builtins:
var_entry.is_const = True
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
builtin_entries = {
"type": ["((PyObject*)&PyType_Type)", py_object_type],
"bool": ["((PyObject*)&PyBool_Type)", py_object_type],
"int": ["((PyObject*)&PyInt_Type)", py_object_type],
"long": ["((PyObject*)&PyLong_Type)", py_object_type],
"float": ["((PyObject*)&PyFloat_Type)", py_object_type],
"complex":["((PyObject*)&PyComplex_Type)", py_object_type],
"bytes": ["((PyObject*)&PyBytes_Type)", py_object_type],
"str": ["((PyObject*)&PyString_Type)", py_object_type],
"unicode":["((PyObject*)&PyUnicode_Type)", py_object_type],
"tuple": ["((PyObject*)&PyTuple_Type)", py_object_type],
"list": ["((PyObject*)&PyList_Type)", py_object_type],
"dict": ["((PyObject*)&PyDict_Type)", py_object_type],
"set": ["((PyObject*)&PySet_Type)", py_object_type],
"frozenset": ["((PyObject*)&PyFrozenSet_Type)", py_object_type],
"slice": ["((PyObject*)&PySlice_Type)", py_object_type],
# "file": ["((PyObject*)&PyFile_Type)", py_object_type], # not in Py3
"None": ["Py_None", py_object_type],
"False": ["Py_False", py_object_type],
"True": ["Py_True", py_object_type],
}
const_counter = 1 # As a temporary solution for compiling code in pxds
class ModuleScope(Scope):
# module_name string Python name of the module
# module_cname string C name of Python module object
# #module_dict_cname string C name of module dict object
# method_table_cname string C name of method table
# doc string Module doc string
# doc_cname string C name of module doc string
# utility_code_list [UtilityCode] Queuing utility codes for forwarding to Code.py
# python_include_files [string] Standard Python headers to be included
# include_files [string] Other C headers to be included
# string_to_entry {string : Entry} Map string const to entry
# identifier_to_entry {string : Entry} Map identifier string const to entry
# context Context
# parent_module Scope Parent in the import namespace
# module_entries {string : Entry} For cimport statements
# type_names {string : 1} Set of type names (used during parsing)
# included_files [string] Cython sources included with 'include'
# pxd_file_loaded boolean Corresponding .pxd file has been processed
# cimported_modules [ModuleScope] Modules imported with cimport
# types_imported {PyrexType : 1} Set of types for which import code generated
# has_import_star boolean Module contains import *
# cpp boolean Compiling a C++ file
# is_cython_builtin boolean Is this the Cython builtin scope (or a child scope)
# is_package boolean Is this a package module? (__init__)
is_module_scope = 1
has_import_star = 0
is_cython_builtin = 0
def __init__(self, name, parent_module, context):
import Builtin
self.parent_module = parent_module
outer_scope = Builtin.builtin_scope
Scope.__init__(self, name, outer_scope, parent_module)
if name == "__init__":
# Treat Spam/__init__.pyx specially, so that when Python loads
# Spam/__init__.so, initSpam() is defined.
self.module_name = parent_module.module_name
self.is_package = True
else:
self.module_name = name
self.is_package = False
self.module_name = EncodedString(self.module_name)
self.context = context
self.module_cname = Naming.module_cname
self.module_dict_cname = Naming.moddict_cname
self.method_table_cname = Naming.methtable_cname
self.doc = ""
self.doc_cname = Naming.moddoc_cname
self.utility_code_list = []
self.module_entries = {}
self.python_include_files = ["Python.h"]
self.include_files = []
self.type_names = dict(outer_scope.type_names)
self.pxd_file_loaded = 0
self.cimported_modules = []
self.types_imported = {}
self.included_files = []
self.has_extern_class = 0
self.cached_builtins = []
self.undeclared_cached_builtins = []
self.namespace_cname = self.module_cname
for var_name in ['__builtins__', '__name__', '__file__', '__doc__', '__path__']:
self.declare_var(EncodedString(var_name), py_object_type, None)
def qualifying_scope(self):
return self.parent_module
def global_scope(self):
return self
def lookup(self, name):
entry = self.lookup_here(name)
if entry is not None:
return entry
if self.context is not None:
language_level = self.context.language_level
else:
language_level = 3
return self.outer_scope.lookup(name, language_level=language_level)
def declare_builtin(self, name, pos):
if not hasattr(builtins, name) \
and name not in Code.non_portable_builtins_map \
and name not in Code.uncachable_builtins:
if self.has_import_star:
entry = self.declare_var(name, py_object_type, pos)
return entry
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
# unknown - assume it's builtin and look it up at runtime
entry = self.declare(name, None, py_object_type, pos, 'private')
entry.is_builtin = 1
return entry
if Options.cache_builtins:
for entry in self.cached_builtins:
if entry.name == name:
return entry
entry = self.declare(None, None, py_object_type, pos, 'private')
if Options.cache_builtins and name not in Code.uncachable_builtins:
entry.is_builtin = 1
entry.is_const = 1 # cached
entry.name = name
entry.cname = Naming.builtin_prefix + name
self.cached_builtins.append(entry)
self.undeclared_cached_builtins.append(entry)
else:
entry.is_builtin = 1
entry.name = name
return entry
def find_module(self, module_name, pos):
# Find a module in the import namespace, interpreting
# relative imports relative to this module's parent.
# Finds and parses the module's .pxd file if the module
# has not been referenced before.
return self.global_scope().context.find_module(
module_name, relative_to = self.parent_module, pos = pos)
def find_submodule(self, name):
# Find and return scope for a submodule of this module,
# creating a new empty one if necessary. Doesn't parse .pxd.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = self, context = self.context)
self.module_entries[name] = scope
return scope
def lookup_submodule(self, name):
# Return scope for submodule of this module, or None.
return self.module_entries.get(name, None)
def add_include_file(self, filename):
if filename not in self.python_include_files \
and filename not in self.include_files:
self.include_files.append(filename)
def add_imported_module(self, scope):
if scope not in self.cimported_modules:
for filename in scope.include_files:
self.add_include_file(filename)
self.cimported_modules.append(scope)
for m in scope.cimported_modules:
self.add_imported_module(m)
def add_imported_entry(self, name, entry, pos):
if entry not in self.entries:
self.entries[name] = entry
else:
warning(pos, "'%s' redeclared " % name, 0)
def declare_module(self, name, scope, pos):
# Declare a cimported module. This is represented as a
# Python module-level variable entry with a module
# scope attached to it. Reports an error and returns
# None if previously declared as something else.
entry = self.lookup_here(name)
if entry:
if entry.is_pyglobal and entry.as_module is scope:
return entry # Already declared as the same module
if not (entry.is_pyglobal and not entry.as_module):
# SAGE -- I put this here so Pyrex
# cimport's work across directories.
# Currently it tries to multiply define
# every module appearing in an import list.
# It shouldn't be an error for a module
# name to appear again, and indeed the generated
# code compiles fine.
return entry
else:
entry = self.declare_var(name, py_object_type, pos)
entry.as_module = scope
self.add_imported_module(scope)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a global variable. If it is a Python
# object type, and not declared with cdef, it will live
# in the module dictionary, otherwise it will be a C
# global variable.
if not visibility in ('private', 'public', 'extern'):
error(pos, "Module-level variable cannot be declared %s" % visibility)
if not is_cdef:
if type is unspecified_type:
type = py_object_type
if not (type.is_pyobject and not type.is_extension_type):
raise InternalError(
"Non-cdef global variable is not a generic Python object")
if not cname:
defining = not in_pxd
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.var_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
#if visibility != 'private' and visibility != entry.visibility:
# warning(pos, "Variable '%s' previously declared as '%s'" % (name, entry.visibility), 1)
if not entry.type.same_as(type):
if visibility == 'extern' and entry.visibility == 'extern':
warning(pos, "Variable '%s' type does not match previous declaration" % name, 1)
entry.type = type
#else:
# error(pos, "Variable '%s' type does not match previous declaration" % name)
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = name
if not entry.is_implemented:
entry.is_implemented = True
return entry
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if is_cdef:
entry.is_cglobal = 1
if entry.type.is_pyobject:
entry.init = 0
self.var_entries.append(entry)
else:
entry.is_pyglobal = 1
if Options.cimport_from_pyx:
entry.used = 1
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
# Add an entry for a C function.
if not cname:
if visibility == 'extern' or (visibility == 'public' and defining):
cname = name
else:
cname = self.mangle(Naming.func_prefix, name)
entry = self.lookup_here(name)
if entry and entry.defined_in_pxd:
if entry.visibility != "private":
mangled_cname = self.mangle(Naming.var_prefix, name)
if entry.cname == mangled_cname:
cname = name
entry.cname = cname
entry.func_cname = cname
entry = Scope.declare_cfunction(
self, name, type, pos,
cname = cname, visibility = visibility, api = api, in_pxd = in_pxd,
defining = defining, modifiers = modifiers, utility_code = utility_code)
return entry
def declare_global(self, name, pos):
entry = self.lookup_here(name)
if not entry:
self.declare_var(name, py_object_type, pos)
def use_utility_code(self, new_code):
if new_code is not None:
self.utility_code_list.append(new_code)
def declare_c_class(self, name, pos, defining = 0, implementing = 0,
module_name = None, base_type = None, objstruct_cname = None,
typeobj_cname = None, typeptr_cname = None, visibility = 'private', typedef_flag = 0, api = 0,
buffer_defaults = None, shadow = 0):
# If this is a non-extern typedef class, expose the typedef, but use
# the non-typedef struct internally to avoid needing forward
# declarations for anonymous structs.
if typedef_flag and visibility != 'extern':
if not (visibility == 'public' or api):
warning(pos, "ctypedef only valid for 'extern' , 'public', and 'api'", 2)
objtypedef_cname = objstruct_cname
typedef_flag = 0
else:
objtypedef_cname = None
#
# Look for previous declaration as a type
#
entry = self.lookup_here(name)
if entry and not shadow:
type = entry.type
if not (entry.is_type and type.is_extension_type):
entry = None # Will cause redeclaration and produce an error
else:
scope = type.scope
if typedef_flag and (not scope or scope.defined):
self.check_previous_typedef_flag(entry, typedef_flag, pos)
if (scope and scope.defined) or (base_type and type.base_type):
if base_type and base_type is not type.base_type:
error(pos, "Base type does not match previous declaration")
if base_type and not type.base_type:
type.base_type = base_type
#
# Make a new entry if needed
#
if not entry or shadow:
type = PyrexTypes.PyExtensionType(name, typedef_flag, base_type, visibility == 'extern')
type.pos = pos
type.buffer_defaults = buffer_defaults
if objtypedef_cname is not None:
type.objtypedef_cname = objtypedef_cname
if visibility == 'extern':
type.module_name = module_name
else:
type.module_name = self.qualified_name
if typeptr_cname:
type.typeptr_cname = typeptr_cname
else:
type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
entry = self.declare_type(name, type, pos, visibility = visibility,
defining = 0, shadow = shadow)
entry.is_cclass = True
if objstruct_cname:
type.objstruct_cname = objstruct_cname
elif not entry.in_cinclude:
type.objstruct_cname = self.mangle(Naming.objstruct_prefix, name)
else:
error(entry.pos,
"Object name required for 'public' or 'extern' C class")
self.attach_var_entry_to_c_class(entry)
self.c_class_entries.append(entry)
#
# Check for re-definition and create scope if needed
#
if not type.scope:
if defining or implementing:
scope = CClassScope(name = name, outer_scope = self,
visibility = visibility)
scope.directives = self.directives.copy()
if base_type and base_type.scope:
scope.declare_inherited_c_attributes(base_type.scope)
type.set_scope(scope)
self.type_entries.append(entry)
else:
if defining and type.scope.defined:
error(pos, "C class '%s' already defined" % name)
elif implementing and type.scope.implemented:
error(pos, "C class '%s' already implemented" % name)
#
# Fill in options, checking for compatibility with any previous declaration
#
if defining:
entry.defined_in_pxd = 1
if implementing: # So that filenames in runtime exceptions refer to
entry.pos = pos # the .pyx file and not the .pxd file
if visibility != 'private' and entry.visibility != visibility:
error(pos, "Class '%s' previously declared as '%s'"
% (name, entry.visibility))
if api:
entry.api = 1
if objstruct_cname:
if type.objstruct_cname and type.objstruct_cname != objstruct_cname:
error(pos, "Object struct name differs from previous declaration")
type.objstruct_cname = objstruct_cname
if typeobj_cname:
if type.typeobj_cname and type.typeobj_cname != typeobj_cname:
error(pos, "Type object name differs from previous declaration")
type.typeobj_cname = typeobj_cname
if self.directives.get('final'):
entry.type.is_final_type = True
# cdef classes are always exported, but we need to set it to
# distinguish between unused Cython utility code extension classes
entry.used = True
#
# Return new or existing entry
#
return entry
def allocate_vtable_names(self, entry):
# If extension type has a vtable, allocate vtable struct and
# slot names for it.
type = entry.type
if type.base_type and type.base_type.vtabslot_cname:
#print "...allocating vtabslot_cname because base type has one" ###
type.vtabslot_cname = "%s.%s" % (
Naming.obj_base_cname, type.base_type.vtabslot_cname)
elif type.scope and type.scope.cfunc_entries:
# one special case here: when inheriting from builtin
# types, the methods may also be built-in, in which
# case they won't need a vtable
entry_count = len(type.scope.cfunc_entries)
base_type = type.base_type
while base_type:
# FIXME: this will break if we ever get non-inherited C methods
if not base_type.scope or entry_count > len(base_type.scope.cfunc_entries):
break
if base_type.is_builtin_type:
# builtin base type defines all methods => no vtable needed
return
base_type = base_type.base_type
#print "...allocating vtabslot_cname because there are C methods" ###
type.vtabslot_cname = Naming.vtabslot_cname
if type.vtabslot_cname:
#print "...allocating other vtable related cnames" ###
type.vtabstruct_cname = self.mangle(Naming.vtabstruct_prefix, entry.name)
type.vtabptr_cname = self.mangle(Naming.vtabptr_prefix, entry.name)
def check_c_classes_pxd(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the .pxd.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is fully declared
#
# Also allocates a name for the vtable if needed.
#
for entry in self.c_class_entries:
# Check defined
if not entry.type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % entry.name)
def check_c_class(self, entry):
type = entry.type
name = entry.name
visibility = entry.visibility
# Check defined
if not type.scope:
error(entry.pos, "C class '%s' is declared but not defined" % name)
# Generate typeobj_cname
if visibility != 'extern' and not type.typeobj_cname:
type.typeobj_cname = self.mangle(Naming.typeobj_prefix, name)
## Generate typeptr_cname
#type.typeptr_cname = self.mangle(Naming.typeptr_prefix, name)
# Check C methods defined
if type.scope:
for method_entry in type.scope.cfunc_entries:
if not method_entry.is_inherited and not method_entry.func_cname:
error(method_entry.pos, "C method '%s' is declared but not defined" %
method_entry.name)
# Allocate vtable name if necessary
if type.vtabslot_cname:
#print "ModuleScope.check_c_classes: allocating vtable cname for", self ###
type.vtable_cname = self.mangle(Naming.vtable_prefix, entry.name)
def check_c_classes(self):
# Performs post-analysis checking and finishing up of extension types
# being implemented in this module. This is called only for the main
# .pyx file scope, not for cimported .pxd scopes.
#
# Checks all extension types declared in this scope to
# make sure that:
#
# * The extension type is implemented
# * All required object and type names have been specified or generated
# * All non-inherited C methods are implemented
#
# Also allocates a name for the vtable if needed.
#
debug_check_c_classes = 0
if debug_check_c_classes:
print("Scope.check_c_classes: checking scope " + self.qualified_name)
for entry in self.c_class_entries:
if debug_check_c_classes:
print("...entry %s %s" % (entry.name, entry))
print("......type = ", entry.type)
print("......visibility = ", entry.visibility)
self.check_c_class(entry)
def check_c_functions(self):
# Performs post-analysis checking making sure all
# defined c functions are actually implemented.
for name, entry in self.entries.items():
if entry.is_cfunction:
if (entry.defined_in_pxd
and entry.scope is self
and entry.visibility != 'extern'
and not entry.in_cinclude
and not entry.is_implemented):
error(entry.pos, "Non-extern C function '%s' declared but not defined" % name)
def attach_var_entry_to_c_class(self, entry):
# The name of an extension class has to serve as both a type
# name and a variable name holding the type object. It is
# represented in the symbol table by a type entry with a
# variable entry attached to it. For the variable entry,
# we use a read-only C global variable whose name is an
# expression that refers to the type object.
import Builtin
var_entry = Entry(name = entry.name,
type = Builtin.type_type,
pos = entry.pos,
cname = "((PyObject*)%s)" % entry.type.typeptr_cname)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
entry.as_variable = var_entry
def is_cpp(self):
return self.cpp
def infer_types(self):
from TypeInference import PyObjectTypeInferer
PyObjectTypeInferer().infer_types(self)
class LocalScope(Scope):
# Does the function have a 'with gil:' block?
has_with_gil_block = False
# Transient attribute, used for symbol table variable declarations
_in_with_gil_block = False
def __init__(self, name, outer_scope, parent_scope = None):
if parent_scope is None:
parent_scope = outer_scope
Scope.__init__(self, name, outer_scope, parent_scope)
def mangle(self, prefix, name):
return prefix + name
def declare_arg(self, name, type, pos):
# Add an entry for an argument of a function.
cname = self.mangle(Naming.var_prefix, name)
entry = self.declare(name, cname, type, pos, 'private')
entry.is_variable = 1
if type.is_pyobject:
entry.init = "0"
entry.is_arg = 1
#entry.borrowed = 1 # Not using borrowed arg refs for now
self.arg_entries.append(entry)
return entry
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
# Add an entry for a local variable.
if visibility in ('public', 'readonly'):
error(pos, "Local variable cannot be declared %s" % visibility)
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
if type.is_pyobject:
entry.init = "0"
entry.is_local = 1
entry.in_with_gil_block = self._in_with_gil_block
self.var_entries.append(entry)
return entry
def declare_global(self, name, pos):
# Pull entry from global scope into local scope.
if self.lookup_here(name):
warning(pos, "'%s' redeclared ", 0)
else:
entry = self.global_scope().lookup_target(name)
self.entries[name] = entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
else:
entry = self.lookup(name)
if entry is None or not entry.from_closure:
error(pos, "no binding for nonlocal '%s' found" % name)
def lookup(self, name):
# Look up name in this scope or an enclosing one.
# Return None if not found.
entry = Scope.lookup(self, name)
if entry is not None:
if entry.scope is not self and entry.scope.is_closure_scope:
if hasattr(entry.scope, "scope_class"):
raise InternalError, "lookup() after scope class created."
# The actual c fragment for the different scopes differs
# on the outside and inside, so we make a new entry
entry.in_closure = True
inner_entry = InnerEntry(entry, self)
inner_entry.is_variable = True
self.entries[name] = inner_entry
return inner_entry
return entry
def mangle_closure_cnames(self, outer_scope_cname):
for entry in self.entries.values():
if entry.from_closure:
cname = entry.outer_entry.cname
if self.is_passthrough:
entry.cname = cname
else:
if cname.startswith(Naming.cur_scope_cname):
cname = cname[len(Naming.cur_scope_cname)+2:]
entry.cname = "%s->%s" % (outer_scope_cname, cname)
elif entry.in_closure:
entry.original_cname = entry.cname
entry.cname = "%s->%s" % (Naming.cur_scope_cname, entry.cname)
class GeneratorExpressionScope(Scope):
"""Scope for generator expressions and comprehensions. As opposed
to generators, these can be easily inlined in some cases, so all
we really need is a scope that holds the loop variable(s).
"""
def __init__(self, outer_scope):
name = outer_scope.global_scope().next_id(Naming.genexpr_id_ref)
Scope.__init__(self, name, outer_scope, outer_scope)
self.directives = outer_scope.directives
self.genexp_prefix = "%s%d%s" % (Naming.pyrex_prefix, len(name), name)
def mangle(self, prefix, name):
return '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(prefix, name))
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = True):
if type is unspecified_type:
# if the outer scope defines a type for this variable, inherit it
outer_entry = self.outer_scope.lookup(name)
if outer_entry and outer_entry.is_variable:
type = outer_entry.type # may still be 'unspecified_type' !
# the parent scope needs to generate code for the variable, but
# this scope must hold its name exclusively
cname = '%s%s' % (self.genexp_prefix, self.parent_scope.mangle(Naming.var_prefix, name or self.next_id()))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
entry.is_local = 1
self.var_entries.append(entry)
self.entries[name] = entry
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
return self.outer_scope.declare_pyfunction(
name, pos, allow_redefine)
def declare_lambda_function(self, func_cname, pos):
return self.outer_scope.declare_lambda_function(func_cname, pos)
def add_lambda_def(self, def_node):
return self.outer_scope.add_lambda_def(def_node)
class ClosureScope(LocalScope):
is_closure_scope = True
def __init__(self, name, scope_name, outer_scope, parent_scope=None):
LocalScope.__init__(self, name, outer_scope, parent_scope)
self.closure_cname = "%s%s" % (Naming.closure_scope_prefix, scope_name)
# def mangle_closure_cnames(self, scope_var):
# for entry in self.entries.values() + self.temp_entries:
# entry.in_closure = 1
# LocalScope.mangle_closure_cnames(self, scope_var)
# def mangle(self, prefix, name):
# return "%s->%s" % (self.cur_scope_cname, name)
# return "%s->%s" % (self.closure_cname, name)
def declare_pyfunction(self, name, pos, allow_redefine=False):
return LocalScope.declare_pyfunction(self, name, pos, allow_redefine, visibility='private')
class StructOrUnionScope(Scope):
# Namespace of a C struct or union.
def __init__(self, name="?"):
Scope.__init__(self, name, None, None)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject = 0):
# Add an entry for an attribute.
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cfunction:
type = PyrexTypes.CPtrType(type)
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos,
"C struct/union member cannot be a Python object")
if visibility != 'private':
error(pos,
"C struct/union member cannot be declared %s" % visibility)
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = ()): # currently no utility code ...
return self.declare_var(name, type, pos,
cname=cname, visibility=visibility)
class ClassScope(Scope):
# Abstract base class for namespace of
# Python class or extension type.
#
# class_name string Python name of the class
# scope_prefix string Additional prefix for names
# declared in the class
# doc string or None Doc string
def __init__(self, name, outer_scope):
Scope.__init__(self, name, outer_scope, outer_scope)
self.class_name = name
self.doc = None
def lookup(self, name):
entry = Scope.lookup(self, name)
if entry:
return entry
if name == "classmethod":
# We don't want to use the builtin classmethod here 'cause it won't do the
# right thing in this scope (as the class members aren't still functions).
# Don't want to add a cfunction to this scope 'cause that would mess with
# the type definition, so we just return the right entry.
entry = Entry(
"classmethod",
"__Pyx_Method_ClassMethod",
PyrexTypes.CFuncType(
py_object_type,
[PyrexTypes.CFuncTypeArg("", py_object_type, None)], 0, 0))
entry.utility_code_definition = Code.UtilityCode.load_cached("ClassMethod", "CythonFunction.c")
entry.is_cfunction = 1
return entry
class PyClassScope(ClassScope):
# Namespace of a Python class.
#
# class_obj_cname string C variable holding class object
is_py_class_scope = 1
def mangle_class_private_name(self, name):
return self.mangle_special_name(name)
def mangle_special_name(self, name):
if name and name.startswith('__') and not name.endswith('__'):
name = EncodedString('_%s%s' % (self.class_name.lstrip('_'), name))
return name
def lookup_here(self, name):
name = self.mangle_special_name(name)
return ClassScope.lookup_here(self, name)
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
name = self.mangle_special_name(name)
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_pyglobal = 1
entry.is_pyclass_attr = 1
return entry
def declare_nonlocal(self, name, pos):
# Pull entry from outer scope into local scope
orig_entry = self.lookup_here(name)
if orig_entry and orig_entry.scope is self and not orig_entry.from_closure:
error(pos, "'%s' redeclared as nonlocal" % name)
else:
entry = self.lookup(name)
if entry is None:
error(pos, "no binding for nonlocal '%s' found" % name)
else:
# FIXME: this works, but it's unclear if it's the
# right thing to do
self.entries[name] = entry
def add_default_value(self, type):
return self.outer_scope.add_default_value(type)
class CClassScope(ClassScope):
# Namespace of an extension type.
#
# parent_type CClassType
# #typeobj_cname string or None
# #objstruct_cname string
# method_table_cname string
# getset_table_cname string
# has_pyobject_attrs boolean Any PyObject attributes?
# property_entries [Entry]
# defined boolean Defined in .pxd file
# implemented boolean Defined in .pyx file
# inherited_var_entries [Entry] Adapted var entries from base class
is_c_class_scope = 1
def __init__(self, name, outer_scope, visibility):
ClassScope.__init__(self, name, outer_scope)
if visibility != 'extern':
self.method_table_cname = outer_scope.mangle(Naming.methtab_prefix, name)
self.getset_table_cname = outer_scope.mangle(Naming.gstab_prefix, name)
self.has_pyobject_attrs = 0
self.property_entries = []
self.inherited_var_entries = []
self.defined = 0
self.implemented = 0
def needs_gc(self):
# If the type or any of its base types have Python-valued
# C attributes, then it needs to participate in GC.
return self.has_pyobject_attrs or \
(self.parent_type.base_type and
self.parent_type.base_type.scope is not None and
self.parent_type.base_type.scope.needs_gc())
def declare_var(self, name, type, pos,
cname = None, visibility = 'private',
api = 0, in_pxd = 0, is_cdef = 0):
if is_cdef:
# Add an entry for an attribute.
if self.defined:
error(pos,
"C attributes cannot be added in implementation part of"
" extension type defined in a pxd")
if get_special_method_signature(name):
error(pos,
"The name '%s' is reserved for a special method."
% name)
if not cname:
cname = name
if visibility == 'private':
cname = c_safe_identifier(cname)
if type.is_cpp_class and visibility != 'extern':
type.check_nullary_constructor(pos)
self.use_utility_code(Code.UtilityCode("#include <new>"))
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
self.var_entries.append(entry)
if type.is_pyobject and name != '__weakref__':
self.has_pyobject_attrs = 1
if visibility not in ('private', 'public', 'readonly'):
error(pos,
"Attribute of extension type cannot be declared %s" % visibility)
if visibility in ('public', 'readonly'):
# If the field is an external typedef, we cannot be sure about the type,
# so do conversion ourself rather than rely on the CPython mechanism (through
# a property; made in AnalyseDeclarationsTransform).
entry.needs_property = True
if name == "__weakref__":
error(pos, "Special attribute __weakref__ cannot be exposed to Python")
if not type.is_pyobject:
if (not type.create_to_py_utility_code(self) or
(visibility=='public' and not
type.create_from_py_utility_code(self))):
error(pos,
"C attribute of type '%s' cannot be accessed from Python" % type)
else:
entry.needs_property = False
return entry
else:
if type is unspecified_type:
type = py_object_type
# Add an entry for a class attribute.
entry = Scope.declare_var(self, name, type, pos,
cname=cname, visibility=visibility,
api=api, in_pxd=in_pxd, is_cdef=is_cdef)
entry.is_member = 1
entry.is_pyglobal = 1 # xxx: is_pyglobal changes behaviour in so many places that
# I keep it in for now. is_member should be enough
# later on
self.namespace_cname = "(PyObject *)%s" % self.parent_type.typeptr_cname
return entry
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
if name in ('__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__'):
error(pos, "Special method %s must be implemented via __richcmp__" % name)
if name == "__new__":
error(pos, "__new__ method of extension type will change semantics "
"in a future version of Pyrex and Cython. Use __cinit__ instead.")
entry = self.declare_var(name, py_object_type, pos,
visibility='extern')
special_sig = get_special_method_signature(name)
if special_sig:
# Special methods get put in the method table with a particular
# signature declared in advance.
entry.signature = special_sig
entry.is_special = 1
else:
entry.signature = pymethod_signature
entry.is_special = 0
self.pyfunc_entries.append(entry)
return entry
def lookup_here(self, name):
if name == "__new__":
name = EncodedString("__cinit__")
entry = ClassScope.lookup_here(self, name)
if entry and entry.is_builtin_cmethod:
if not self.parent_type.is_builtin_type:
# For subtypes of builtin types, we can only return
# optimised C methods if the type if final.
# Otherwise, subtypes may choose to override the
# method, but the optimisation would prevent the
# subtype method from being called.
if not self.parent_type.is_final_type:
return None
return entry
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'private', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
if get_special_method_signature(name) and not self.parent_type.is_builtin_type:
error(pos, "Special methods must be declared with 'def', not 'cdef'")
args = type.args
if not args:
error(pos, "C method has no self argument")
elif not self.parent_type.assignable_from(args[0].type):
error(pos, "Self argument (%s) of C method '%s' does not match parent type (%s)" %
(args[0].type, name, self.parent_type))
entry = self.lookup_here(name)
if cname is None:
cname = c_safe_identifier(name)
if entry:
if not entry.is_cfunction:
warning(pos, "'%s' redeclared " % name, 0)
else:
if defining and entry.func_cname:
error(pos, "'%s' already defined" % name)
#print "CClassScope.declare_cfunction: checking signature" ###
if entry.is_final_cmethod and entry.is_inherited:
error(pos, "Overriding final methods is not allowed")
elif type.same_c_signature_as(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
pass
elif type.compatible_signature_with(entry.type, as_cmethod = 1) and type.nogil == entry.type.nogil:
entry = self.add_cfunction(name, type, pos, cname, visibility='ignore', modifiers=modifiers)
defining = 1
else:
error(pos, "Signature not compatible with previous declaration")
error(entry.pos, "Previous declaration is here")
else:
if self.defined:
error(pos,
"C method '%s' not previously declared in definition part of"
" extension type" % name)
entry = self.add_cfunction(name, type, pos, cname,
visibility, modifiers)
if defining:
entry.func_cname = self.mangle(Naming.func_prefix, name)
entry.utility_code = utility_code
type.entry = entry
if u'inline' in modifiers:
entry.is_inline_cmethod = True
if (self.parent_type.is_final_type or entry.is_inline_cmethod or
self.directives.get('final')):
entry.is_final_cmethod = True
entry.final_func_cname = entry.func_cname
return entry
def add_cfunction(self, name, type, pos, cname, visibility, modifiers):
# Add a cfunction entry without giving it a func_cname.
prev_entry = self.lookup_here(name)
entry = ClassScope.add_cfunction(self, name, type, pos, cname,
visibility, modifiers)
entry.is_cmethod = 1
entry.prev_entry = prev_entry
return entry
def declare_builtin_cfunction(self, name, type, cname, utility_code = None):
# overridden methods of builtin types still have their Python
# equivalent that must be accessible to support bound methods
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern',
utility_code = utility_code)
var_entry = Entry(name, name, py_object_type)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return entry
def declare_property(self, name, doc, pos):
entry = self.lookup_here(name)
if entry is None:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_property = 1
entry.doc = doc
entry.scope = PropertyScope(name,
outer_scope = self.global_scope(), parent_scope = self)
entry.scope.parent_type = self.parent_type
self.property_entries.append(entry)
return entry
def declare_inherited_c_attributes(self, base_scope):
# Declare entries for all the C attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
def adapt(cname):
return "%s.%s" % (Naming.obj_base_cname, base_entry.cname)
entries = base_scope.inherited_var_entries + base_scope.var_entries
for base_entry in entries:
entry = self.declare(base_entry.name, adapt(base_entry.cname),
base_entry.type, None, 'private')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
# If the class defined in a pxd, specific entries have not been added.
# Ensure now that the parent (base) scope has specific entries
# Iterate over a copy as get_all_specialized_function_types() will mutate
for base_entry in base_scope.cfunc_entries[:]:
if base_entry.type.is_fused:
base_entry.type.get_all_specialized_function_types()
for base_entry in base_scope.cfunc_entries:
cname = base_entry.cname
var_entry = base_entry.as_variable
is_builtin = var_entry and var_entry.is_builtin
if not is_builtin:
cname = adapt(cname)
entry = self.add_cfunction(base_entry.name, base_entry.type,
base_entry.pos, cname,
base_entry.visibility, base_entry.func_modifiers)
entry.is_inherited = 1
if base_entry.is_final_cmethod:
entry.is_final_cmethod = True
entry.is_inline_cmethod = base_entry.is_inline_cmethod
if (self.parent_scope == base_scope.parent_scope or
entry.is_inline_cmethod):
entry.final_func_cname = base_entry.final_func_cname
if is_builtin:
entry.is_builtin_cmethod = True
entry.as_variable = var_entry
if base_entry.utility_code:
entry.utility_code = base_entry.utility_code
class CppClassScope(Scope):
# Namespace of a C++ class.
is_cpp_class_scope = 1
default_constructor = None
type = None
def __init__(self, name, outer_scope, templates=None):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
if templates is not None:
for T in templates:
template_entry = self.declare(
T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
template_entry.is_type = 1
def declare_var(self, name, type, pos,
cname = None, visibility = 'extern',
api = 0, in_pxd = 0, is_cdef = 0,
allow_pyobject = 0, defining = 0):
# Add an entry for an attribute.
if not cname:
cname = name
entry = self.lookup_here(name)
if defining and entry is not None:
if not entry.type.same_as(type):
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
entry.is_variable = 1
if type.is_cfunction and self.type:
entry.func_cname = "%s::%s" % (self.type.declaration_code(""), cname)
if name != "this" and (defining or name != "<init>"):
self.var_entries.append(entry)
if type.is_pyobject and not allow_pyobject:
error(pos,
"C++ class member cannot be a Python object")
return entry
def check_base_default_constructor(self, pos):
# Look for default constructors in all base classes.
if self.default_constructor is None:
entry = self.lookup(self.name)
if not entry.type.base_classes:
self.default_constructor = True
return
for base_class in entry.type.base_classes:
if base_class is PyrexTypes.error_type:
continue
temp_entry = base_class.scope.lookup_here("<init>")
found = False
if temp_entry is None:
continue
for alternative in temp_entry.all_alternatives():
type = alternative.type
if type.is_ptr:
type = type.base_type
if not type.args:
found = True
break
if not found:
self.default_constructor = temp_entry.scope.name
error(pos, "no matching function for call to " \
"%s::%s()" % (temp_entry.scope.name, temp_entry.scope.name))
elif not self.default_constructor:
error(pos, "no matching function for call to %s::%s()" %
(self.default_constructor, self.default_constructor))
def declare_cfunction(self, name, type, pos,
cname = None, visibility = 'extern', api = 0, in_pxd = 0,
defining = 0, modifiers = (), utility_code = None):
if name in (self.name.split('::')[-1], '__init__') and cname is None:
self.check_base_default_constructor(pos)
cname = self.type.cname
name = '<init>'
type.return_type = PyrexTypes.InvisibleVoidType()
elif name == '__dealloc__' and cname is None:
cname = "~%s" % self.type.cname
name = '<del>'
type.return_type = PyrexTypes.InvisibleVoidType()
prev_entry = self.lookup_here(name)
entry = self.declare_var(name, type, pos,
defining=defining,
cname=cname, visibility=visibility)
if prev_entry and not defining:
entry.overloaded_alternatives = prev_entry.all_alternatives()
entry.utility_code = utility_code
type.entry = entry
return entry
def declare_inherited_cpp_attributes(self, base_scope):
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in \
base_scope.inherited_var_entries + base_scope.var_entries:
#contructor is not inherited
if base_entry.name == "<init>":
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name # FIXME: is there anything to do in this case?
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, 0,
modifiers = base_entry.func_modifiers,
utility_code = base_entry.utility_code)
entry.is_inherited = 1
def specialize(self, values):
scope = CppClassScope(self.name, self.outer_scope)
for entry in self.entries.values():
if entry.is_type:
scope.declare_type(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
template=1)
elif entry.type.is_cfunction:
for e in entry.all_alternatives():
scope.declare_cfunction(e.name,
e.type.specialize(values),
e.pos,
e.cname,
utility_code = e.utility_code)
else:
scope.declare_var(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
entry.visibility)
return scope
class PropertyScope(Scope):
# Scope holding the __get__, __set__ and __del__ methods for
# a property of an extension type.
#
# parent_type PyExtensionType The type to which the property belongs
is_property_scope = 1
def declare_pyfunction(self, name, pos, allow_redefine=False):
# Add an entry for a method.
signature = get_property_accessor_signature(name)
if signature:
entry = self.declare(name, name, py_object_type, pos, 'private')
entry.is_special = 1
entry.signature = signature
return entry
else:
error(pos, "Only __get__, __set__ and __del__ methods allowed "
"in a property declaration")
return None
class CConstScope(Scope):
def __init__(self, const_base_type_scope):
Scope.__init__(
self,
'const_' + const_base_type_scope.name,
const_base_type_scope.outer_scope,
const_base_type_scope.parent_scope)
self.const_base_type_scope = const_base_type_scope
def lookup_here(self, name):
entry = self.const_base_type_scope.lookup_here(name)
if entry is not None:
entry = copy.copy(entry)
entry.type = PyrexTypes.c_const_type(entry.type)
return entry
| apache-2.0 | 5,370,810,031,526,427,000 | 41.961853 | 120 | 0.564523 | false |
Chuck8080/pyspider | pyspider/database/__init__.py | 2 | 3189 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-10-08 15:04:08
import urlparse
def connect_database(url):
"""
create database object by url
mysql:
mysql+type://user:passwd@host:port/database
sqlite:
# relative path
sqlite+type:///path/to/database.db
# absolute path
sqlite+type:////path/to/database.db
# memory database
sqlite+type://
mongodb:
mongodb+type://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
type:
taskdb
projectdb
resultdb
"""
parsed = urlparse.urlparse(url)
engine, dbtype = parsed.scheme.split('+')
if engine == 'mysql':
parames = {}
if parsed.username:
parames['user'] = parsed.username
if parsed.password:
parames['passwd'] = parsed.password
if parsed.hostname:
parames['host'] = parsed.hostname
if parsed.port:
parames['port'] = parsed.port
if parsed.path.strip('/'):
parames['database'] = parsed.path.strip('/')
if dbtype == 'taskdb':
from .mysql.taskdb import TaskDB
return TaskDB(**parames)
elif dbtype == 'projectdb':
from .mysql.projectdb import ProjectDB
return ProjectDB(**parames)
elif dbtype == 'resultdb':
from .mysql.resultdb import ResultDB
return ResultDB(**parames)
else:
raise Exception('unknow database type: %s' % dbtype)
elif engine == 'sqlite':
if parsed.path.startswith('//'):
path = '/'+parsed.path.strip('/')
elif parsed.path.startswith('/'):
path = './'+parsed.path.strip('/')
elif not parsed.path:
path = ':memory:'
else:
raise Exception('error path: %s' % parsed.path)
if dbtype == 'taskdb':
from .sqlite.taskdb import TaskDB
return TaskDB(path)
elif dbtype == 'projectdb':
from .sqlite.projectdb import ProjectDB
return ProjectDB(path)
elif dbtype == 'resultdb':
from .sqlite.resultdb import ResultDB
return ResultDB(path)
else:
raise Exception('unknow database type: %s' % dbtype)
elif engine == 'mongodb':
url = url.replace(parsed.scheme, 'mongodb')
parames = {}
if parsed.path.strip('/'):
parames['database'] = parsed.path.strip('/')
if dbtype == 'taskdb':
from .mongodb.taskdb import TaskDB
return TaskDB(url, **parames)
elif dbtype == 'projectdb':
from .mongodb.projectdb import ProjectDB
return ProjectDB(url, **parames)
elif dbtype == 'resultdb':
from .mongodb.resultdb import ResultDB
return ResultDB(url, **parames)
else:
raise Exception('unknow database type: %s' % dbtype)
else:
raise Exception('unknow engine: %s' % engine)
| apache-2.0 | -3,509,956,590,248,709,600 | 31.876289 | 115 | 0.55127 | false |
yury-s/v8-inspector | Source/chrome/tools/telemetry/telemetry/util/bootstrap.py | 19 | 5557 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap Chrome Telemetry by downloading all its files from SVN servers.
Requires a DEPS file to specify which directories on which SVN servers
are required to run Telemetry. Format of that DEPS file is a subset of the
normal DEPS file format[1]; currently only only the "deps" dictionary is
supported and nothing else.
Fetches all files in the specified directories using WebDAV (SVN is WebDAV under
the hood).
[1] http://dev.chromium.org/developers/how-tos/depottools#TOC-DEPS-file
"""
import imp
import logging
import os
import urllib
import urlparse
# Dummy module for DAVclient.
davclient = None
# Link to file containing the 'davclient' WebDAV client library.
_DAVCLIENT_URL = ('https://src.chromium.org/chrome/trunk/src/tools/'
'telemetry/third_party/davclient/davclient.py')
def _DownloadAndImportDAVClientModule():
"""Dynamically import davclient helper library."""
global davclient
davclient_src = urllib.urlopen(_DAVCLIENT_URL).read()
davclient = imp.new_module('davclient')
exec davclient_src in davclient.__dict__ # pylint: disable=exec-used
class DAVClientWrapper(object):
"""Knows how to retrieve subdirectories and files from WebDAV/SVN servers."""
def __init__(self, root_url):
"""Initialize SVN server root_url, save files to local dest_dir.
Args:
root_url: string url of SVN/WebDAV server
"""
self.root_url = root_url
self.client = davclient.DAVClient(root_url)
@staticmethod
def __norm_path_keys(dict_with_path_keys):
"""Returns a dictionary with os.path.normpath called on every key."""
return dict((os.path.normpath(k), v) for (k, v) in
dict_with_path_keys.items())
def GetDirList(self, path):
"""Returns string names of all files and subdirs of path on the server."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
# remove this path
del props[os.path.normpath(path)]
return [os.path.basename(p) for p in props.keys()]
def IsFile(self, path):
"""Returns True if the path is a file on the server, False if directory."""
props = self.__norm_path_keys(self.client.propfind(path, depth=1))
return props[os.path.normpath(path)]['resourcetype'] is None
def Traverse(self, src_path, dst_path):
"""Walks the directory hierarchy pointed to by src_path download all files.
Recursively walks src_path and saves all files and subfolders into
dst_path.
Args:
src_path: string path on SVN server to save (absolute path on server).
dest_path: string local path (relative or absolute) to save to.
"""
if self.IsFile(src_path):
if not os.path.exists(os.path.dirname(dst_path)):
logging.info('Creating %s', os.path.dirname(dst_path))
os.makedirs(os.path.dirname(dst_path))
if os.path.isfile(dst_path):
logging.info('Skipping %s', dst_path)
else:
logging.info('Saving %s to %s', self.root_url + src_path, dst_path)
urllib.urlretrieve(self.root_url + src_path, dst_path)
return
else:
for subdir in self.GetDirList(src_path):
self.Traverse(os.path.join(src_path, subdir),
os.path.join(dst_path, subdir))
def ListAllDepsPaths(deps_file):
"""Recursively returns a list of all paths indicated in this deps file.
Note that this discards information about where path dependencies come from,
so this is only useful in the context of a Chromium source checkout that has
already fetched all dependencies.
Args:
deps_file: File containing deps information to be evaluated, in the
format given in the header of this file.
Returns:
A list of string paths starting under src that are required by the
given deps file, and all of its sub-dependencies. This amounts to
the keys of the 'deps' dictionary.
"""
deps = {}
deps_includes = {}
chrome_root = os.path.dirname(__file__)
while os.path.basename(chrome_root) != 'src':
chrome_root = os.path.abspath(os.path.join(chrome_root, os.pardir))
exec open(deps_file).read() # pylint: disable=exec-used
deps_paths = deps.keys()
for path in deps_includes.keys():
# Need to localize the paths.
path = os.path.join(chrome_root, os.pardir, path)
deps_paths += ListAllDepsPaths(path)
return deps_paths
def DownloadDeps(destination_dir, url):
"""Saves all the dependencies in deps_path.
Opens and reads url, assuming the contents are in the simple DEPS-like file
format specified in the header of this file, then download all
files/directories listed to the destination_dir.
Args:
destination_dir: String path to directory to download files into.
url: URL containing deps information to be evaluated.
"""
logging.warning('Downloading deps from %s...', url)
# TODO(wiltzius): Add a parameter for which revision to pull.
_DownloadAndImportDAVClientModule()
deps = {}
deps_includes = {}
exec urllib.urlopen(url).read() # pylint: disable=exec-used
for dst_path, src_path in deps.iteritems():
full_dst_path = os.path.join(destination_dir, dst_path)
parsed_url = urlparse.urlparse(src_path)
root_url = parsed_url.scheme + '://' + parsed_url.netloc
dav_client = DAVClientWrapper(root_url)
dav_client.Traverse(parsed_url.path, full_dst_path)
for url in deps_includes.values():
DownloadDeps(destination_dir, url)
| bsd-3-clause | -6,250,481,518,184,842,000 | 33.73125 | 80 | 0.700558 | false |
bigswitch/horizon | openstack_dashboard/dashboards/project/firewalls/urls.py | 8 | 2530 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.firewalls import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^\?tab=fwtabs__firewalls$',
views.IndexView.as_view(), name='firewalls'),
url(r'^\?tab=fwtabs__rules$', views.IndexView.as_view(), name='rules'),
url(r'^\?tab=fwtabs__policies$',
views.IndexView.as_view(), name='policies'),
url(r'^addrule$', views.AddRuleView.as_view(), name='addrule'),
url(r'^addpolicy$', views.AddPolicyView.as_view(), name='addpolicy'),
url(r'^addfirewall/(?P<policy_id>[^/]+)/$',
views.AddFirewallView.as_view(), name='addfirewall'),
url(r'^addfirewall$', views.AddFirewallView.as_view(), name='addfirewall'),
url(r'^insertrule/(?P<policy_id>[^/]+)/$',
views.InsertRuleToPolicyView.as_view(), name='insertrule'),
url(r'^removerule/(?P<policy_id>[^/]+)/$',
views.RemoveRuleFromPolicyView.as_view(), name='removerule'),
url(r'^updaterule/(?P<rule_id>[^/]+)/$',
views.UpdateRuleView.as_view(), name='updaterule'),
url(r'^updatepolicy/(?P<policy_id>[^/]+)/$',
views.UpdatePolicyView.as_view(), name='updatepolicy'),
url(r'^updatefirewall/(?P<firewall_id>[^/]+)/$',
views.UpdateFirewallView.as_view(), name='updatefirewall'),
url(r'^rule/(?P<rule_id>[^/]+)/$',
views.RuleDetailsView.as_view(), name='ruledetails'),
url(r'^policy/(?P<policy_id>[^/]+)/$',
views.PolicyDetailsView.as_view(), name='policydetails'),
url(r'^addrouter/(?P<firewall_id>[^/]+)/$',
views.AddRouterToFirewallView.as_view(), name='addrouter'),
url(r'^removerouter/(?P<firewall_id>[^/]+)/$',
views.RemoveRouterFromFirewallView.as_view(), name='removerouter'),
url(r'^firewall/(?P<firewall_id>[^/]+)/$',
views.FirewallDetailsView.as_view(), name='firewalldetails'),
]
| apache-2.0 | 74,863,357,260,858,850 | 48.607843 | 79 | 0.648221 | false |
brianrodri/oppia | core/storage/auth/gae_models.py | 2 | 10858 | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for managing user authentication."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform import models
import feconf
base_models, user_models = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
datastore_services = models.Registry.import_datastore_services()
ONLY_FIREBASE_SEED_MODEL_ID = '1'
class UserAuthDetailsModel(base_models.BaseModel):
"""Stores the authentication details for a particular user.
Instances of this class are keyed by user id.
"""
# Authentication identifier from Google AppEngine (GAE). Exists only for
# full users. None for profile users.
gae_id = datastore_services.StringProperty(indexed=True)
# Authentication identifier from the Firebase authentication server.
firebase_auth_id = datastore_services.StringProperty(indexed=True)
# For profile users, the user ID of the full user associated with them.
# None for full users. Required for profiles because gae_id/firebase_auth_id
# attribute is None for them, hence this attribute stores their association
# with a full user who do have a gae_id/firebase_auth_id.
parent_user_id = (
datastore_services.StringProperty(indexed=True, default=None))
@staticmethod
def get_deletion_policy():
"""Model contains data to delete corresponding to a user: id, gae_id,
firebase_auth_id, and parent_user_id fields.
"""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
"""Currently, the model holds authentication details relevant only for
backend. Currently the only relevant user data is the username of the
parent.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER
@staticmethod
def get_field_names_for_takeout():
"""We do not want to export the internal user id for the parent, so we
export the username instead.
"""
return {
'parent_user_id': 'parent_username'
}
@classmethod
def get_export_policy(cls):
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(super(cls, cls).get_export_policy(), **{
'gae_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'firebase_auth_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'parent_user_id': base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def export_data(cls, user_id):
"""Exports the username of the parent."""
user_auth_model = cls.get(user_id, strict=False)
if user_auth_model and user_auth_model.parent_user_id:
parent_data = user_models.UserSettingsModel.get(
user_auth_model.parent_user_id)
parent_username = parent_data.username
return {'parent_username': parent_username}
else:
return {}
@classmethod
def apply_deletion_policy(cls, user_id):
"""Delete instances of UserAuthDetailsModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
cls.delete_by_id(user_id)
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether UserAuthDetailsModel exists for the given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserAuthDetailsModel refers to the given user ID.
"""
return cls.get_by_id(user_id) is not None
@classmethod
def get_by_auth_id(cls, provider_id, auth_id):
"""Fetch a user entry by auth_id of a particular auth service.
Args:
provider_id: str. Name of the provider of the auth ID.
auth_id: str. Authentication detail corresponding to the
authentication provider.
Returns:
UserAuthDetailsModel. The UserAuthDetailsModel instance having a
particular user mapped to the given auth_id and the auth provider
if there exists one, else None.
"""
if provider_id == feconf.GAE_AUTH_PROVIDER_ID:
return cls.query(cls.gae_id == auth_id).get()
elif provider_id == feconf.FIREBASE_AUTH_PROVIDER_ID:
return cls.query(cls.firebase_auth_id == auth_id).get()
return None
class UserIdentifiersModel(base_models.BaseModel):
"""Stores the relationship between user ID and GAE ID.
Instances of this class are keyed by GAE ID.
"""
user_id = datastore_services.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Model contains data to delete corresponding to a user: id, and
user_id fields.
"""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
"""Currently, the model holds identifiers relevant only for backend that
should not be exported.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(super(cls, cls).get_export_policy(), **{
'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def apply_deletion_policy(cls, user_id):
"""Delete instances of UserIdentifiersModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
datastore_services.delete_multi(
cls.query(cls.user_id == user_id).fetch(keys_only=True))
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether UserIdentifiersModel exists for the given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserIdentifiersModel refers to the given user ID.
"""
return cls.query(cls.user_id == user_id).get(keys_only=True) is not None
@classmethod
def get_by_gae_id(cls, gae_id):
"""Fetch an entry by GAE ID.
Args:
gae_id: str. The GAE ID.
Returns:
UserIdentifiersModel. The model with user_id field equal to user_id
argument.
"""
return cls.get_by_id(gae_id)
@classmethod
def get_by_user_id(cls, user_id):
"""Fetch an entry by user ID.
Args:
user_id: str. The user ID.
Returns:
UserIdentifiersModel. The model with user_id field equal to user_id
argument.
"""
return cls.query(cls.user_id == user_id).get()
class UserIdByFirebaseAuthIdModel(base_models.BaseModel):
"""Stores the relationship between user ID and Firebase auth ID.
Instances of this class are keyed by Firebase auth ID.
"""
user_id = datastore_services.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
"""Model has data to delete corresponding to users: id and user_id."""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
"""Currently, the model holds IDs relevant only for backend that should
not be exported.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(
super(UserIdByFirebaseAuthIdModel, cls).get_export_policy(),
**{'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE})
@classmethod
def apply_deletion_policy(cls, user_id):
"""Delete instances of UserIdByFirebaseAuthIdModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
datastore_services.delete_multi(
cls.query(cls.user_id == user_id).fetch(keys_only=True))
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether UserIdByFirebaseAuthIdModel exists for given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserIdByFirebaseAuthIdModel refers to the given
user ID.
"""
return cls.query(cls.user_id == user_id).get(keys_only=True) is not None
@classmethod
def get_by_user_id(cls, user_id):
"""Fetch an entry by user ID.
Args:
user_id: str. The user ID.
Returns:
UserIdByFirebaseAuthIdModel. The model with user_id field equal
to user_id argument.
"""
return cls.query(cls.user_id == user_id).get()
class FirebaseSeedModel(base_models.BaseModel):
"""Dummy model used to kick-off the DestroyFirebaseAccountsOneOffJob."""
@staticmethod
def get_deletion_policy():
"""Model should never be erased."""
return base_models.DELETION_POLICY.KEEP
@staticmethod
def get_model_association_to_user():
"""Model does not correspond to any users."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
"""Model does not correspond to any users."""
return False
| apache-2.0 | -8,613,376,661,223,391,000 | 34.253247 | 80 | 0.649383 | false |
williechen/DailyApp | 11/py201501/userCard/UserResideCard.py | 1 | 3493 | '''
Created on 2014/5/10
@author: Administrator
@deprecated:
外來人口統一證號編碼,共計10碼,前2碼使用英文字母,
第1碼為區域碼(同國民身分證註1)
第2碼為性別碼(註 2)、3至10碼為阿拉伯數字,其中第3至9碼為流水號、第10碼為檢查號碼。
台北市 A、台中市 B、基隆市 C、台南市 D、高雄市 E
新北市 F、宜蘭縣 G、桃園縣 H、嘉義市 I、新竹縣 J
苗栗縣 K、原台中縣 L、南投縣 M、彰化縣 N、新竹市 O
雲林縣 P、嘉義縣 Q、原台南縣 R、原高雄縣 S、屏東縣 T
花蓮縣 U、台東縣 V、金門縣 W、澎湖縣 X、連江縣 Z
臺灣地區無戶籍國民、大陸地區人民、港澳居民:
男性使用A、女性使用B
外國人:
男性使用C、女性使用D
'''
import random
AREA_CODE = {
"A": "10", "B": "11", "C": "12", "D": "13", "E": "14", "F": "15", "G": "16", "H": "17", "J": "18",
"K": "19", "L": "20", "M": "21", "N": "22", "P": "23", "Q": "24", "R": "25", "S": "26", "T": "27",
"U": "28", "V": "29", "W": "32", "X": "30", "Y": "31", "Z": "33", "I": "34", "O": "35"
}
SEXY = {
"1, 1": 'A', "1, 2":'B',
"2, 1":'C', "2, 2": 'D'
}
def userResideCardGenerate(area, sexy, block):
firstCode = area.upper()
secondCode1 = "%s, %s" % (block, sexy)
secondCode2 = SEXY[secondCode1]
firstNumber = AREA_CODE[firstCode]
secondNumber = AREA_CODE[secondCode2][1]
sevenList = ''.join(runSevenDigits()[1])
lastNumber = 0
value = firstNumber + secondNumber+ sevenList
valueNumber = runSevenDigits(value)
if (valueNumber[0] == 'Y'):
count = (sum(countDigits(valueNumber[1])) % 10)
lastNumber = (10 - count) if count else 0
return firstCode+secondCode2+sevenList+str(lastNumber)
def userResideCardVerification(number):
isVerification = False
numbers = number.upper()
firstNumber = AREA_CODE[ numbers[0] ]
secondNumber = AREA_CODE[ numbers[1] ][1]
sevenList = number[2:9]
lastNumber = number[-1]
value = firstNumber + secondNumber+sevenList
valueNumber = runSevenDigits(value)
if (valueNumber[0] == "Y"):
sumValue = sum(countDigits(valueNumber[1])) + int(lastNumber)
if ((sumValue % 10) == 0):
isVerification = True
return isVerification
def runSevenDigits(digit=None):
seven = []
isDigit = 'N'
# 有值 且 全為數字字串
if (digit and digit.isdigit()):
isDigit = 'Y'
for i in digit:
seven.append(i)
# 無值
if (digit == None):
isDigit = 'Y'
for i in range(7):
seven.append("%s" % random.choice(range(1, 9)));
return (isDigit, seven)
def countDigits(digit):
weighted = [1, 9 , 8, 7, 6, 5, 4, 3, 2, 1]
# 各數字分別乘以1, 9 , 8, 7, 6, 5, 4, 3, 2, 1
weightDigits = []
for i in range(10):
weightValue = int(digit[i]) * weighted[i]
weightDigits.append("%s" % weightValue)
# 超過2位數 取個位數
sumDigits = []
for i in weightDigits:
if (len(i) >= 2):
sumDigits.append(int(i[1]))
else:
sumDigits.append(int(i))
return sumDigits
if __name__ == '__main__':
number = "FA12345689"
print(userResideCardVerification(number))
print(userResideCardGenerate('f', '1', 2)) | lgpl-3.0 | -8,885,407,845,444,418,000 | 25.633028 | 100 | 0.54337 | false |
pulsar-chem/Pulsar-Core | test/math/TestUniverse.py | 1 | 3083 | import pulsar as psr
def run_test():
tester = psr.PyTester("Testing Universe Python Interface")
#Constructors and assignment
U0, U1=psr.DoubleUniverse(),psr.DoubleUniverse()
tester.test_equal("Default constructor",U0,U1)
U2=psr.DoubleUniverse([1.0,2.0,3.0])
U3=psr.DoubleUniverse([1.0,2.0,3.0])
tester.test_equal("Variadic initializer",U2,U3);
U4=psr.DoubleUniverse(U3);
tester.test_equal("Copy constructor",U3,U4);
U0=U2;
tester.test_equal("Assignment",U0,U3);
#Basic properties
tester.test_return("Size",True,3,U0.size)
tester.test_return("Count",True,True,U0.count,1.0)
tester.test_return("Get index",True,2,U3.idx,3.0)
tester.test_call("Get non-existant index",False,U3.idx,5.0)
tester.test_return("Get hash U0",True,U0.my_hash(),U3.my_hash)
tester.test_return("Get hash U3",True,U0.my_hash(),U4.my_hash)
#Element access/modification
tester.test_return("Subscript operator",True,3.0,U0.__getitem__,2)
tester.test_call("Subscript operator (out of range)",False,U0.__getitem__,9)
tester.test_return("at function",True,3.0,U0.at,2)
tester.test_call("at function (out of range)",False,U0.at,9)
U0.insert(4.0)
tester.test_return("insert elements",True,U0,U3.insert,4.0)
#U0=U3=1-4; U1=empty; U2=U4=1-3
#Set operations
U5=psr.DoubleUniverse([4.0,5.0,6.0])
U8=psr.DoubleUniverse([1.0,2.0,3.0,4.0,5.0,6.0])
tester.test_return("union assign",True,U8,U0.union_assign,U5)
tester.test_return("union",True,U8,U3.set_union,U5)
U9=psr.DoubleUniverse([1.0,2.0])
U10=psr.DoubleUniverse([1.0,2.0,15.0,16.0])
tester.test_return("intersection assign",True,U9,U0.intersection_assign,U10)
print(U10)
tester.test_return("intersection",True,U9,U3.intersection,U10)
U11=psr.DoubleUniverse([3.0,4.0,5.0,6.0])
tester.test_return("difference",True,U11,U8.difference,U9)
tester.test_return("difference assign",True,U11,U8.difference_assign,U9)
#Comparison operators
tester.test_return("not equal",True,True,U9.__ne__,U11)
tester.test_return("superset equal",True,True,U11.is_superset_of,U8)
tester.test_return("superset true",True,True,U10.is_superset_of,U9)
tester.test_return("superset false",True,False,U9.is_superset_of,U10)
tester.test_return("proper superset equal",True,False,U11.is_proper_superset_of,U8)
tester.test_return("proper supserset true",True,True,U10.is_proper_superset_of,U9)
tester.test_return("proper superset false",True,False,U9.is_proper_superset_of,U10)
tester.test_return("subset equal",True,True,U11.is_subset_of,U8)
tester.test_return("subset true",True,True,U9.is_subset_of,U10)
tester.test_return("subset false",True,False,U10.is_subset_of,U9)
tester.test_return("proper subset equal",True,False,U11.is_proper_subset_of,U8)
tester.test_return("proper subset true",True,True,U9.is_proper_subset_of,U10)
tester.test_return("proper subset false",True,False,U10.is_proper_subset_of,U9)
tester.print_results()
return tester.nfailed()
| bsd-3-clause | 1,687,321,810,387,588,600 | 46.430769 | 87 | 0.697373 | false |
ghchinoy/tensorflow | tensorflow/contrib/fused_conv/python/ops/fused_conv2d_bias_activation_op_test_base.py | 2 | 43709 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides test suites that can be run to test fused convolutions.
Each of the two test suites in this module, FusedConv2DBiasActivationTest and
FusedConvInt8Tests, should be "instantiated" by declaring a class which inherits
from the FusedConv test and a class that provides the standard test.TestCase
API.
See e.g. fused_conv2d_bias_activation_op_test.py in this folder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import numpy as np
from tensorflow.contrib.fused_conv.python.ops import fused_conv2d_bias_activation_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 2048
], [4, 8, 8, 448], [4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 2048], [
4, 8, 8, 1760
], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 17, 17, 1248], [4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1216], [4, 17, 17, 1216], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 1152], [4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128], [
4, 17, 17, 768
], [4, 17, 17, 128], [4, 17, 17, 128], [4, 17, 17, 768],
[4, 17, 17, 768], [4, 35, 35, 96], [4, 35, 35, 288], [
4, 35, 35, 64
], [4, 35, 35, 288], [4, 35, 35, 256], [4, 35, 35, 48], [
4, 35, 35, 256
], [4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192], [
4, 35, 35, 192
], [4, 73, 73, 64], [4, 73, 73, 64], [4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384], [
1, 1, 2048, 192
], [3, 3, 448, 384], [1, 1, 2048, 320], [1, 1, 2048, 448], [1, 1, 2048, 384],
[1, 1, 1760, 384], [1, 1, 1760, 192], [1, 1, 1760, 448], [
1, 1, 1760, 320
], [3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192], [
3, 3, 128, 320
], [1, 1, 1248, 128], [1, 3, 224, 224], [3, 1, 192, 256], [
1, 3, 192, 256
], [1, 1, 1216, 192], [1, 1, 1216, 96], [3, 1, 224, 224], [
3, 3, 192, 224
], [1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128], [
3, 1, 192, 192
], [3, 3, 160, 192], [1, 1, 1152, 160], [1, 1, 1024, 128], [
1, 3, 128, 192
], [1, 1, 1024, 160], [3, 1, 128, 192], [1, 1, 1024, 256], [
3, 1, 128, 128
], [1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128], [
1, 1, 768, 128
], [1, 1, 768, 320], [3, 3, 96, 96], [3, 3, 288, 384], [
3, 3, 64, 96
], [1, 1, 288, 64], [1, 1, 256, 64], [5, 5, 48, 64],
[1, 1, 256, 48], [3, 3, 96, 96], [1, 1, 192, 32], [
1, 1, 192, 64
], [1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64,
64], [1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384], [4, 8, 8, 192], [
4, 8, 8, 384
], [4, 8, 8, 320], [4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384], [
4, 8, 8, 192
], [4, 8, 8, 448], [4, 8, 8, 320], [4, 8, 8, 192], [4, 17, 17, 192], [
4, 17, 17, 192
], [4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224], [4, 17, 17, 256], [
4, 17, 17, 256
], [4, 17, 17, 192], [4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224], [
4, 17, 17, 192
], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 192], [
4, 17, 17, 160
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 192], [
4, 17, 17, 256
], [4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128], [
4, 17, 17, 128
], [4, 17, 17, 320], [4, 17, 17, 96], [4, 17, 17, 384], [4, 35, 35, 96], [
4, 35, 35, 64
], [4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48], [4, 35, 35, 96],
[4, 35, 35, 32], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 71, 71, 192], [4, 73, 73, 64], [4, 147, 147, 64]]
strides = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1
]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
VALID, SAME, SAME, VALID, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID
]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def _GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NCHW", True), ("NHWC", True)]
return test_configs
def _IotaNdF32Constant(dim_sizes):
def MakeList(dims):
if len(dims) == 1:
return [float(1 + f) for f in range(dims[0])]
return [MakeList(dims[1:]) for _ in range(dims[0])]
return constant_op.constant(MakeList(dim_sizes), dtype=dtypes.float32)
def _GetInceptionFwdTest(input_size,
filter_size,
stride,
padding,
gpu_only=True):
def Test(self):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping InceptionFwd %s",
(input_size, filter_size, stride, padding))
return
tf_logging.info("Testing InceptionFwd %s",
(input_size, filter_size, stride, padding))
self.CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
class FusedConv2DBiasActivationTest(object):
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def _DtypesToTest(self, use_gpu):
return [dtypes.float32]
def _FilterFormatsToTest(self, use_gpu):
return ["HWIO", "OIHW"]
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, bias,
strides, padding, activation_mode, data_format,
filter_format, dtype):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
bias: 1-D bias tensor of length output_depth.
strides: Stride: [col_stride, row_stride]
padding: Padding type.
activation_mode: Activation mode.
data_format: Format of the data tensors.
filter_format: Filter format to use for the fused convolution.
dtype: Data type for inputs and outputs.
Returns:
Symbolic tensor value and reference value that can be used to
execute the computation and verify the results.
"""
input_size = np.prod(tensor_in_sizes)
filter_size = np.prod(filter_in_sizes)
bias_size = filter_in_sizes[-1] # equals to output depth
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, input_size + 1)]
x2 = [f * 1.0 for f in range(1, filter_size + 1)]
# This is to guarantee that there are always negative values after
# bias add so that we can test whether relu works correctly.
x3 = bias
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
fused_t2 = t2
if filter_format == "OIHW":
fused_t2 = _HwioToOihw(t2)
t3 = constant_op.constant(x3, shape=[bias_size], dtype=dtype)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
fused_t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
filter_format=filter_format,
activation_mode=activation_mode)
ref_conv_output = nn_ops.conv2d(
t1, t2, strides=strides, padding=padding, data_format=data_format)
ref_bias_output = nn_ops.bias_add(
ref_conv_output, t3, data_format=data_format)
ref_output = nn_ops.relu(ref_bias_output)
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
ref_output = test_util.NCHWToNHWC(ref_output)
return output, ref_output
def CompareFwdValues(self, tensor_in_sizes, filter_in_sizes, conv_strides,
padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
x3 = np.random.rand(*[filter_in_sizes[-1]]).astype(np.float32)
def _SetupVal(data_format, use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
t3 = constant_op.constant(x3, shape=[filter_in_sizes[-1]])
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
output = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
t1,
t2,
t3,
strides=strides,
padding=padding,
data_format=data_format,
activation_mode="Relu")
if data_format == "NCHW":
output = test_util.NCHWToNHWC(output)
return output
with self.session() as sess, self.test_scope():
tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-3, atol=1e-3)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, bias, strides,
padding):
with self.session() as sess, self.test_scope():
tensors = []
ref_tensors = []
for (data_format, use_gpu) in _GetTestConfigs():
with ops.device("/gpu:0" if use_gpu else "/cpu:0"):
for dtype in self._DtypesToTest(use_gpu):
for filter_format in self._FilterFormatsToTest(use_gpu):
result, expected = self._SetupValuesForDevice(
tensor_in_sizes, filter_in_sizes, bias, strides, padding,
"Relu", data_format, filter_format, dtype)
tensors.append(result)
ref_tensors.append(expected)
values = sess.run(tensors)
ref_values = sess.run(ref_tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
ref_value = ref_values[i]
tf_logging.info("expected = %s", ref_value)
tf_logging.info("actual = %s", value)
tol = 1e-5
if value.dtype == np.float16:
tol = 1e-3
self.assertAllClose(
np.ravel(ref_value), np.ravel(value), atol=tol, rtol=tol)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x1Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 21.0, 0.0, 0.0, 57.0, 0.0, 0.0, 93.0, 41.0, 0.0, 129.0,
# 86.0, 43.0, 165.0, 131.0, 97.0
# ]
medians = [-45.0, -130.0, -215.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=medians,
strides=[1, 1],
padding="VALID")
def testConv2DEmpty(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DEmpty test.")
return
# expected_output = []
self._VerifyValues(
tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
bias=[0.0, 0.0, 0.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2Filter test.")
return
# expected_output = [0.0, 0.0, 0.0, 401.0, 533.0, 665.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2500.0, -2500.0, -2500.0],
strides=[1, 1],
padding="VALID")
def testConv2D1x2Filter(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D1x2Filter test.")
return
# expected_output = [
# 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 190.0, 265.0, 340.0, 343.0, 436.0, 529.0
# ]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
bias=[-500.0, -500.0, -500.0],
strides=[1, 1],
padding="VALID")
def testConv2D2x2FilterStride2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2 test.")
return
# expected_output = [0.0, 67.0, 163.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -2300.0, -2300.0],
strides=[2, 2],
padding="VALID")
def testConv2D2x2FilterStride2Same(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride2Same test.")
return
# expected_output = [0.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
bias=[-2300.0, -1000.0, -1000.0],
strides=[2, 2],
padding="SAME")
def testConv2D2x2FilterStride1x2(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2D2x2FilterStride1x2 test.")
return
# expected_output = [0.0, 0.0, 8.0, 28.0, 48.0, 68.0]
self._VerifyValues(
tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-90.0],
strides=[1, 2],
padding="VALID")
def testConv2DKernelSmallerThanStrideValid(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideValid test.")
return
# expected_output = [0, 0, 175, 205]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-100.0],
strides=[3, 3],
padding="VALID")
def testConv2DKernelSmallerThanStrideSame(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSmallerThanStrideSame test.")
return
# expected = [0, 0, 2, 4]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [0, 0, 4, 6]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
bias=[-5.0],
strides=[2, 2],
padding="SAME")
# expected = [4, 0, 1, 0]
self._VerifyValues(
tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-40.0],
strides=[3, 3],
padding="SAME")
def testConv2DKernelSizeMatchesInputSize(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping Conv2DKernelSizeMatchesInputSize test.")
return
# expected = [0, 5]
self._VerifyValues(
tensor_in_sizes=[1, 2, 2, 1],
filter_in_sizes=[2, 2, 1, 2],
bias=[-50.0, -55.0],
strides=[1, 1],
padding="VALID")
# expected = [0, 2, 282, 322]
self._VerifyValues(
tensor_in_sizes=[1, 8, 8, 1],
filter_in_sizes=[2, 2, 1, 1],
bias=[-200.0],
strides=[4, 4],
padding="SAME")
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Incorrect filter shape.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32),
array_ops.placeholder(dtypes.float32, shape=[1, 3]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
# Depth mismatch.
with self.assertRaises(ValueError):
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
array_ops.placeholder(dtypes.float32, shape=[32, 20, 20, 3]),
array_ops.placeholder(dtypes.float32, shape=[4, 4, 2, 2]),
array_ops.placeholder(dtypes.float32),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Relu")
def testOpEdgeCases(self, gpu_only=True):
if gpu_only and not test.is_gpu_available():
tf_logging.info("Skipping OpEdgeCases tests.")
return
with self.session() as sess, self.test_scope():
# Illegal strides.
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[2, 1, 1, 1],
padding="SAME",
activation_mode="Relu"))
with self.assertRaisesRegexp(
errors_impl.UnimplementedError,
".*strides.*in the batch and depth dimensions"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 2],
padding="SAME",
activation_mode="Relu"))
# Illegal activation mode.
with self.assertRaisesRegexp(ValueError,
"Op passed string 'Tanh' not in:"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1, 1, 1, 1]),
_IotaNdF32Constant([1]),
strides=[1, 1, 1, 1],
padding="SAME",
activation_mode="Tanh"))
# Filter larger than input.
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([20, 21, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
with self.assertRaisesRegexp(ValueError, "Negative dimension size"):
sess.run(
fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_IotaNdF32Constant([32, 20, 20, 3]),
_IotaNdF32Constant([21, 20, 3, 2]),
_IotaNdF32Constant([2]),
strides=[1, 1, 1, 1],
padding="VALID",
activation_mode="Relu"))
# Add InceptionFwd tests to FusedConv2DBiasActivationTest.
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(_GetShrunkInceptionShapes()):
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_" + str(index),
_GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
# TODO(b/35359731)
# Fwd, BckInput, and BackFilter to test that for certain input parameter
# set, winograd nonfused algorithm will be excluded from conv autotune. If
# in such case, winograd nonfused algorithm is added as one option of the
# conv autotune, and cuDNN version is smaller than 7, the following tests
# will fail.
ishape = [1, 400, 400, 1]
fshape = [1, 1, 1, 256]
oshape = [1, 400, 400, 256]
setattr(FusedConv2DBiasActivationTest, "testInceptionFwd_No_Winograd_Nonfused",
_GetInceptionFwdTest(ishape, fshape, 1, "SAME", gpu_only=True))
def _CalculateConvolvedOutputDim(input_dim, filter_dim, stride, padding_type):
"""Calculates the size of an output dimension of a strided convolution.
Given the sizes of the corresponding dimension of the input and filter shapes,
and the stride and padding_types, calculates the size of the output dimension.
This function can be called separately for each input dimension.
Args:
input_dim: An `int` specifying the size of the input dimension.
filter_dim: An `int` specifying the size of the filter dimension.
stride: An `int` specifying the step size of the convolution along the
input dimension.
padding_type: either 'VALID' or 'SAME'.
Returns:
The size of the output dimension.
"""
if padding_type == "VALID":
return (input_dim - filter_dim + stride) // stride
else: # padding_type == 'SAME'
return (input_dim + stride - 1) // stride
def _GetFusedConvInt8TestParams():
"""Returns test parameters shared by all Int8 FusedConv tests."""
_test_params = [
{
"batch_size": 4,
"input_channels": 256,
"output_channels": 256,
"input_height": 228,
"input_width": 228,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.00002,
"side_input_scale": 0.2,
"bias_scale": 1.0,
"padding_type": "SAME"
},
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 8,
"input_width": 8,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 1,
"input_channels": 4,
"output_channels": 4,
"input_height": 6,
"input_width": 6,
"filter_height": 6,
"filter_width": 6,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.0,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 2,
"input_channels": 8,
"output_channels": 16,
"input_height": 8,
"input_width": 8,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 2,
"horizontal_stride": 2,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "VALID"
},
{
"batch_size": 2,
"input_channels": 16,
"output_channels": 16,
"input_height": 9,
"input_width": 9,
"filter_height": 3,
"filter_width": 3,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 5,
"filter_width": 5,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.001,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 7,
"filter_width": 1,
"vertical_stride": 2,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
{
"batch_size": 3,
"input_channels": 8,
"output_channels": 8,
"input_height": 9,
"input_width": 9,
"filter_height": 1,
"filter_width": 7,
"vertical_stride": 1,
"horizontal_stride": 1,
"conv_input_scale": 0.002,
"side_input_scale": 0.5,
"bias_scale": 1,
"padding_type": "SAME"
},
]
return _test_params
def _Int8Roundtrip(fn, tensor):
return array_ops.bitcast(
fn(array_ops.bitcast(tensor, dtypes.int8)), dtypes.qint8)
def _NchwVectCToNchw(in_tensor):
# [N, C / 4, H, W, 4] => [N, C / 4, 4, H, W] == [N, C, H, W]
t = array_ops.transpose(in_tensor, [0, 1, 4, 2, 3])
n = in_tensor.shape.dims[0].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [n, c, h, w])
def _NchwVectCToNhwc(in_tensor):
# [N, C / 4, H, W, 4] => [N, H, W, C / 4, 4] == [N, H, W, C]
t = array_ops.transpose(in_tensor, [0, 2, 3, 1, 4])
n = in_tensor.shape.dims[0].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
c = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
return array_ops.reshape(t, [n, h, w, c])
def _OihwVectIToHwio(in_tensor):
# [O, I / 4, H, W, 4] => [O, I / 4, 4, H, W] == [O, I, H, W]
t = array_ops.transpose(in_tensor, [2, 3, 1, 4, 0])
o = in_tensor.shape.dims[0].value
i = in_tensor.shape.dims[1].value * in_tensor.shape.dims[4].value
h = in_tensor.shape.dims[2].value
w = in_tensor.shape.dims[3].value
return array_ops.reshape(t, [h, w, i, o])
def _NchwToNchwVectC(in_tensor):
n, c, h, w = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, c // 4, 4, h, w])
return array_ops.transpose(t, [0, 1, 3, 4, 2])
def _NhwcToNchwVectC(in_tensor):
# [H, H, W, C] => [N, H, W, C //4, 4] => [N, C / 4, H, W, 4]
n, h, w, c = in_tensor.shape.as_list()
assert c % 4 == 0
t = array_ops.reshape(in_tensor, [n, h, w, c // 4, 4])
return array_ops.transpose(t, [0, 3, 1, 2, 4])
def _HwioToOihw(in_tensor):
return array_ops.transpose(in_tensor, [3, 2, 0, 1])
def _SimulateFusedConv2dBiasActivationInt8OnCpu(conv_input_scale, conv_input,
kernel, padding, strides,
side_input_scale, side_input,
biases, apply_relu):
"""Simulates the int8 fused 2-D convolution op using separate float ops.
The arguments and return values have the same format, meanings and
restrictions as the actual op.
Args:
conv_input_scale: A scalar 'float'.
conv_input: A `Tensor` of type `qint8` in NHWC layout.
kernel: A `Tensor` of type `qint8` in HWIO layout.
padding: A `string` from: `"SAME", "VALID"`.
strides: A list of `ints`.
side_input_scale: A scalar 'float'.
side_input: A `Tensor` of type `qint8` in NHWC layout.
biases: A `Tensor` of type `float32` in NHWC layout.
apply_relu: A boolean to specify whether to apply "Relu" activation function
that clips outputs to the range [0, 127], or "None" activation that clips
to the range [-128, 127].
Returns:
A `Tensor` of type `qint8` in NHWC layout.
"""
conv_result = nn_ops.conv2d(
math_ops.cast(conv_input, dtypes.float32),
math_ops.cast(kernel, dtypes.float32),
strides=strides,
padding=padding,
data_format="NHWC") * conv_input_scale
conv_and_side_inputs = conv_result + side_input_scale * math_ops.cast(
side_input, dtypes.float32)
output = nn_ops.bias_add(conv_and_side_inputs, biases, data_format="NHWC")
if apply_relu:
output = nn_ops.relu(output)
# In this case quantization is identical to clipping and casting.
result, _, _ = gen_array_ops.quantize_v2(output, -128, 127, dtypes.qint8)
return result
# FusedConv2DBiasActivation on CPU supports only NHWC/HWIO data format.
class FusedConvInt8CPUTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_height, input_width, input_channels],
minval=-0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
self.assertTrue(
sess.run(
math_ops.reduce_all(
math_ops.greater_equal(
array_ops.bitcast(conv_input, dtypes.int8), 0))))
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[filter_height, filter_width, input_channels, output_channels],
minval=-1.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, output_height, output_width, output_channels],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
strides = [1, vertical_stride, horizontal_stride, 1]
actual = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=strides,
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
expected = _SimulateFusedConv2dBiasActivationInt8OnCpu(
conv_input_scale, conv_input, kernel, padding_type, strides,
side_input_scale, side_input, biases, apply_relu)
actual_y, expected_y = sess.run([actual, expected])
self.assertAllClose(actual_y, expected_y, rtol=0, atol=1)
def testFusedConvInt8(self):
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
def testRoundingMode(self):
"""Verify the fused convolution op uses half-to-even rounding mode."""
batches = 1
input_size = 2
input_channels = 1
output_channels = 1
conv_input = np.array([1, 2, 3, 4]).reshape(
(batches, input_size, input_size, input_channels)).astype(np.int8)
kernel = np.array([1]).reshape(
(1, 1, input_channels, output_channels)).astype(np.int8)
biases = np.zeros((output_channels)).astype(np.float32)
with self.session() as sess, self.test_scope():
actual = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
math_ops.cast(conv_input, dtypes.qint8),
math_ops.cast(kernel, dtypes.qint8),
biases,
strides=[1, 1, 1, 1],
padding="SAME",
conv_input_scale=0.5,
side_input_scale=0.0,
activation_mode="None",
data_format="NHWC",
filter_format="HWIO")
actual_value = sess.run(actual)
# The convolution output scaled is [0.5, 1.0, 1.5, 2.0]. After rounding
# half to even, the final output is [0, 1, 2, 2].
self.assertTrue(
np.array_equal(actual_value.flatten(),
np.array([0, 1, 2, 2]).astype(np.int8)))
# Test that GPU and CPU kernels produce identical results for QInt8 data type.
class FusedConvInt8CorrespondenceTests(object):
"""Verify quantization with CPU kernel."""
_test_params = _GetFusedConvInt8TestParams()
@contextlib.contextmanager
def test_scope(self): # pylint: disable=invalid-name
"""Can be overridden in base classes to provide a test scope."""
yield
def runTest(self, test_param, apply_relu):
"""Runs tests for dimensions configured in test_param."""
batch_size = test_param["batch_size"]
input_channels = test_param["input_channels"]
output_channels = test_param["output_channels"]
input_height = test_param["input_height"]
input_width = test_param["input_width"]
filter_height = test_param["filter_height"]
filter_width = test_param["filter_width"]
vertical_stride = test_param["vertical_stride"]
horizontal_stride = test_param["horizontal_stride"]
conv_input_scale = test_param["conv_input_scale"]
side_input_scale = test_param["side_input_scale"]
bias_scale = test_param["bias_scale"]
padding_type = test_param["padding_type"]
with self.session() as sess, self.test_scope():
conv_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform(
[batch_size, input_channels // 4, input_height, input_width, 4],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
self.assertTrue(
sess.run(
math_ops.reduce_all(
math_ops.greater_equal(
array_ops.bitcast(conv_input, dtypes.int8), 0))))
kernel, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
output_channels, input_channels // 4, filter_height, filter_width,
4
],
minval=-128.0,
maxval=127.0,
dtype=dtypes.float32),
-128.0,
127.0,
dtypes.qint8,
mode="SCALED")
output_height = _CalculateConvolvedOutputDim(input_height, filter_height,
vertical_stride,
padding_type)
output_width = _CalculateConvolvedOutputDim(input_width, filter_width,
horizontal_stride,
padding_type)
tf_logging.info("output_height=%s, output_width=%s", output_height,
output_width)
side_input, _, _ = gen_array_ops.quantize_v2(
random_ops.random_uniform([
batch_size, output_channels // 4, output_height, output_width, 4
],
minval=0.0,
maxval=1.0,
dtype=dtypes.float32),
-1.0,
1.0,
dtypes.qint8,
mode="SCALED")
biases = random_ops.random_uniform([output_channels],
minval=-10 * bias_scale,
maxval=20 * bias_scale,
dtype=dtypes.float32)
with ops.device("/cpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
_Int8Roundtrip(_NchwVectCToNhwc, conv_input),
_Int8Roundtrip(_OihwVectIToHwio, kernel),
biases,
strides=[1, vertical_stride, horizontal_stride, 1],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else _Int8Roundtrip(
_NchwVectCToNhwc, side_input)),
activation_mode="Relu" if apply_relu else "None",
data_format="NHWC",
filter_format="HWIO")
cpu_result = _Int8Roundtrip(_NhwcToNchwVectC, t)
with ops.device("/gpu:0"):
t = fused_conv2d_bias_activation_op.fused_conv2d_bias_activation(
conv_input,
kernel,
biases,
strides=[1, 1, vertical_stride, horizontal_stride],
padding=padding_type,
conv_input_scale=conv_input_scale,
side_input_scale=side_input_scale,
side_input=(None if side_input_scale == 0.0 else side_input),
activation_mode="Relu" if apply_relu else "None",
data_format="NCHW_VECT_C",
filter_format="OIHW_VECT_I")
gpu_result = t
cpu_y, gpu_y = sess.run([cpu_result, gpu_result])
self.assertAllClose(cpu_y, gpu_y, rtol=0, atol=0)
def testFusedConvInt8(self):
if not test.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(6, 1)):
tf_logging.info("int8 test skipped because not run with --config=cuda or "
"no GPUs with compute capability >= 6.1 are available.")
return
for apply_relu in [True, False]:
for test_param in self._test_params:
self.runTest(test_param, apply_relu)
if __name__ == "__main__":
test.main()
| apache-2.0 | 9,062,528,024,283,166,000 | 36.582975 | 84 | 0.556133 | false |
ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/project/loadbalancers/views.py | 9 | 15842 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.loadbalancers \
import forms as project_forms
from openstack_dashboard.dashboards.project.loadbalancers \
import tables as project_tables
from openstack_dashboard.dashboards.project.loadbalancers \
import tabs as project_tabs
from openstack_dashboard.dashboards.project.loadbalancers import utils
from openstack_dashboard.dashboards.project.loadbalancers \
import workflows as project_workflows
class IndexView(tabs.TabbedTableView):
tab_group_class = (project_tabs.LoadBalancerTabs)
template_name = 'project/loadbalancers/details_tabs.html'
page_title = _("Load Balancer")
class AddPoolView(workflows.WorkflowView):
workflow_class = project_workflows.AddPool
class AddVipView(workflows.WorkflowView):
workflow_class = project_workflows.AddVip
def get_initial(self):
initial = super(AddVipView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['subnet'] = api.neutron.subnet_get(
self.request, pool.subnet_id).cidr
except Exception as e:
initial['subnet'] = ''
msg = _('Unable to retrieve pool subnet. %s') % e
exceptions.handle(self.request, msg)
return initial
class AddMemberView(workflows.WorkflowView):
workflow_class = project_workflows.AddMember
class AddMonitorView(workflows.WorkflowView):
workflow_class = project_workflows.AddMonitor
class PoolDetailsView(tabs.TabView):
tab_group_class = project_tabs.PoolDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ pool.name|default:pool.id }}"
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, pid)
except Exception:
pool = []
exceptions.handle(self.request,
_('Unable to retrieve pool details.'))
else:
for monitor in pool.health_monitors:
display_name = utils.get_monitor_display_name(monitor)
setattr(monitor, 'display_name', display_name)
return pool
def get_context_data(self, **kwargs):
context = super(PoolDetailsView, self).get_context_data(**kwargs)
pool = self.get_data()
context['pool'] = pool
table = project_tables.PoolsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(pool)
return context
def get_tabs(self, request, *args, **kwargs):
pool = self.get_data()
return self.tab_group_class(self.request, pool=pool, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class VipDetailsView(tabs.TabView):
tab_group_class = project_tabs.VipDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ vip.name|default:vip_id }}"
@memoized.memoized_method
def get_data(self):
vid = self.kwargs['vip_id']
vip = []
try:
vip = api.lbaas.vip_get(self.request, vid)
fips = api.network.tenant_floating_ip_list(self.request)
vip_fip = [fip for fip in fips
if fip.port_id == vip.port.id]
if vip_fip:
vip.fip = vip_fip[0]
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve VIP details.'))
return vip
def get_context_data(self, **kwargs):
context = super(VipDetailsView, self).get_context_data(**kwargs)
vip = self.get_data()
context['vip'] = vip
vip_nav = vip.pool.name_or_id
breadcrumb = [
(_("Load Balancers"), self.get_redirect_url()),
(vip_nav,
reverse('horizon:project:loadbalancers:vipdetails',
args=(vip.id,))),
(_("VIP"),), ]
context["custom_breadcrumb"] = breadcrumb
return context
def get_tabs(self, request, *args, **kwargs):
vip = self.get_data()
return self.tab_group_class(request, vip=vip, **kwargs)
@staticmethod
def get_redirect_url():
return reverse("horizon:project:loadbalancers:index")
class MemberDetailsView(tabs.TabView):
tab_group_class = project_tabs.MemberDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ member.name|default:member.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve member details.'))
def get_context_data(self, **kwargs):
context = super(MemberDetailsView, self).get_context_data(**kwargs)
member = self.get_data()
context['member'] = member
member_nav = member.pool.name_or_id
breadcrumb = [
(_("Load Balancers"), self.get_redirect_url()),
(member_nav,
reverse('horizon:project:loadbalancers:pooldetails',
args=(member.pool.id,))),
(_("Members"), reverse('horizon:project:loadbalancers:members')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MembersTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(member)
return context
def get_tabs(self, request, *args, **kwargs):
member = self.get_data()
return self.tab_group_class(request, member=member, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class MonitorDetailsView(tabs.TabView):
tab_group_class = project_tabs.MonitorDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = "{{ monitor.name|default:monitor.id }}"
@memoized.memoized_method
def get_data(self):
mid = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, mid)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve monitor details.'))
def get_context_data(self, **kwargs):
context = super(MonitorDetailsView, self).get_context_data(**kwargs)
monitor = self.get_data()
context['monitor'] = monitor
breadcrumb = [
(_("Load Balancers"), self.get_redirect_url()),
(_("Monitors"), reverse('horizon:project:loadbalancers:monitors')),
]
context["custom_breadcrumb"] = breadcrumb
table = project_tables.MonitorsTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(monitor)
return context
def get_tabs(self, request, *args, **kwargs):
monitor = self.get_data()
return self.tab_group_class(request, monitor=monitor, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy("horizon:project:loadbalancers:index")
class UpdatePoolView(forms.ModalFormView):
form_class = project_forms.UpdatePool
form_id = "update_pool_form"
modal_header = _("Edit Pool")
template_name = "project/loadbalancers/updatepool.html"
context_object_name = 'pool'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatepool"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Pool")
def get_context_data(self, **kwargs):
context = super(UpdatePoolView, self).get_context_data(**kwargs)
context["pool_id"] = self.kwargs['pool_id']
args = (self.kwargs['pool_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
pool_id = self.kwargs['pool_id']
try:
return api.lbaas.pool_get(self.request, pool_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve pool details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
pool = self._get_object()
return {'name': pool['name'],
'pool_id': pool['id'],
'description': pool['description'],
'lb_method': pool['lb_method'],
'admin_state_up': pool['admin_state_up']}
class UpdateVipView(forms.ModalFormView):
form_class = project_forms.UpdateVip
form_id = "update_vip_form"
modal_header = _("Edit VIP")
template_name = "project/loadbalancers/updatevip.html"
context_object_name = 'vip'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatevip"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit VIP")
def get_context_data(self, **kwargs):
context = super(UpdateVipView, self).get_context_data(**kwargs)
context["vip_id"] = self.kwargs['vip_id']
args = (self.kwargs['vip_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vip_id = self.kwargs['vip_id']
try:
return api.lbaas.vip_get(self.request, vip_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VIP details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vip = self._get_object()
persistence = getattr(vip, 'session_persistence', None)
if persistence:
stype = persistence['type']
if stype == 'APP_COOKIE':
cookie = persistence['cookie_name']
else:
cookie = ''
else:
stype = ''
cookie = ''
return {'name': vip['name'],
'vip_id': vip['id'],
'description': vip['description'],
'pool_id': vip['pool_id'],
'session_persistence': stype,
'cookie_name': cookie,
'connection_limit': vip['connection_limit'],
'admin_state_up': vip['admin_state_up']}
class UpdateMemberView(forms.ModalFormView):
form_class = project_forms.UpdateMember
form_id = "update_pool_form"
modal_header = _("Edit Member")
template_name = "project/loadbalancers/updatemember.html"
context_object_name = 'member'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemember"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Member")
def get_context_data(self, **kwargs):
context = super(UpdateMemberView, self).get_context_data(**kwargs)
context["member_id"] = self.kwargs['member_id']
args = (self.kwargs['member_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
member_id = self.kwargs['member_id']
try:
return api.lbaas.member_get(self.request, member_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve member details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
member = self._get_object()
return {'member_id': member['id'],
'pool_id': member['pool_id'],
'weight': member['weight'],
'admin_state_up': member['admin_state_up']}
class UpdateMonitorView(forms.ModalFormView):
form_class = project_forms.UpdateMonitor
form_id = "update_monitor_form"
modal_header = _("Edit Monitor")
template_name = "project/loadbalancers/updatemonitor.html"
context_object_name = 'monitor'
submit_label = _("Save Changes")
submit_url = "horizon:project:loadbalancers:updatemonitor"
success_url = reverse_lazy("horizon:project:loadbalancers:index")
page_title = _("Edit Monitor")
def get_context_data(self, **kwargs):
context = super(UpdateMonitorView, self).get_context_data(**kwargs)
context["monitor_id"] = self.kwargs['monitor_id']
args = (self.kwargs['monitor_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
monitor_id = self.kwargs['monitor_id']
try:
return api.lbaas.pool_health_monitor_get(self.request, monitor_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve health monitor details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
monitor = self._get_object()
return {'monitor_id': monitor['id'],
'delay': monitor['delay'],
'timeout': monitor['timeout'],
'max_retries': monitor['max_retries'],
'admin_state_up': monitor['admin_state_up']}
class AddPMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.AddPMAssociation
def get_initial(self):
initial = super(AddPMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
class DeletePMAssociationView(workflows.WorkflowView):
workflow_class = project_workflows.DeletePMAssociation
def get_initial(self):
initial = super(DeletePMAssociationView, self).get_initial()
initial['pool_id'] = self.kwargs['pool_id']
try:
pool = api.lbaas.pool_get(self.request, initial['pool_id'])
initial['pool_name'] = pool.name
initial['pool_monitors'] = pool.health_monitors
except Exception as e:
msg = _('Unable to retrieve pool. %s') % e
exceptions.handle(self.request, msg)
return initial
| apache-2.0 | -7,602,421,537,666,447,000 | 36.363208 | 79 | 0.620187 | false |
MarcosCommunity/odoo | comunity_modules/account_financial_report_webkit/report/common_partner_balance_reports.py | 22 | 16179 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright Camptocamp SA 2011
# SQL inspired from OpenERP original code
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
from operator import add
from .common_balance_reports import CommonBalanceReportHeaderWebkit
from .common_partner_reports import CommonPartnersReportHeaderWebkit
class CommonPartnerBalanceReportHeaderWebkit(CommonBalanceReportHeaderWebkit,
CommonPartnersReportHeaderWebkit):
"""Define common helper for balance (trial balance, P&L,
BS oriented financial report"""
def _get_account_partners_details(self, account_by_ids, main_filter,
target_move, start, stop,
initial_balance_mode,
partner_filter_ids=False):
res = {}
filter_from = False
if main_filter in ('filter_period', 'filter_no', 'filter_opening'):
filter_from = 'period'
elif main_filter == 'filter_date':
filter_from = 'date'
partners_init_balances_by_ids = {}
for account_id, account_details in account_by_ids.iteritems():
partners_init_balances_by_ids.update(
self._get_partners_initial_balances(
account_id, start, initial_balance_mode,
partner_filter_ids=partner_filter_ids,
# we'll never exclude reconciled entries in the legal
# reports
exclude_reconcile=False))
opening_mode = 'exclude_opening'
if main_filter == 'filter_opening':
opening_mode = 'include_opening'
# get credit and debit for partner
details = self._get_partners_totals_account(
filter_from,
account_id,
start,
stop,
target_move,
partner_filter_ids=partner_filter_ids,
mode=opening_mode)
# merge initial balances in partner details
if partners_init_balances_by_ids.get(account_id):
for partner_id, initial_balances in \
partners_init_balances_by_ids[account_id].iteritems():
if initial_balances.get('init_balance'):
details[partner_id].update(
{'init_balance': initial_balances['init_balance']})
# compute balance for the partner
for partner_id, partner_details in details.iteritems():
details[partner_id]['balance'] = details[partner_id].\
get('init_balance', 0.0) + \
details[partner_id].get('debit', 0.0) - \
details[partner_id].get('credit', 0.0)
res[account_id] = details
return res
def _get_partners_initial_balances(self, account_ids, start_period,
initial_balance_mode,
partner_filter_ids=None,
exclude_reconcile=False):
# we get the initial balance from the opening period (opening_balance)
# when the opening period is included in the start period and
# when there is at least one entry in the opening period. Otherwise we
# compute it from previous periods
if initial_balance_mode == 'opening_balance':
opening_period_selected = self.get_included_opening_period(
start_period)
res = self._compute_partners_initial_balances(
account_ids, start_period, partner_filter_ids,
force_period_ids=opening_period_selected,
exclude_reconcile=exclude_reconcile)
elif initial_balance_mode == 'initial_balance':
res = self._compute_partners_initial_balances(
account_ids, start_period, partner_filter_ids,
exclude_reconcile=exclude_reconcile)
else:
res = {}
return res
def _get_partners_totals_account(self, filter_from, account_id, start,
stop, target_move,
partner_filter_ids=None,
mode='exclude_opening'):
final_res = defaultdict(dict)
sql_select = """
SELECT account_move_line.partner_id,
sum(account_move_line.debit) AS debit,
sum(account_move_line.credit) AS credit
FROM account_move_line"""
sql_joins = ''
sql_where = "WHERE account_move_line.account_id = %(account_id)s \
AND account_move_line.state = 'valid' "
method = getattr(self, '_get_query_params_from_' + filter_from + 's')
sql_conditions, search_params = method(start, stop, mode=mode)
sql_where += sql_conditions
if partner_filter_ids:
sql_where += " AND account_move_line.partner_id \
in %(partner_ids)s"
search_params.update({'partner_ids': tuple(partner_filter_ids)})
if target_move == 'posted':
sql_joins += "INNER JOIN account_move \
ON account_move_line.move_id = account_move.id"
sql_where += " AND account_move.state = %(target_move)s"
search_params.update({'target_move': target_move})
sql_groupby = "GROUP BY account_move_line.partner_id"
search_params.update({'account_id': account_id})
query = ' '.join((sql_select, sql_joins, sql_where, sql_groupby))
self.cursor.execute(query, search_params)
res = self.cursor.dictfetchall()
if res:
for row in res:
final_res[row['partner_id']] = row
return final_res
def _get_filter_type(self, result_selection):
filter_type = ('payable', 'receivable')
if result_selection == 'customer':
filter_type = ('receivable',)
if result_selection == 'supplier':
filter_type = ('payable',)
return filter_type
def _get_partners_comparison_details(self, data, account_ids, target_move,
comparison_filter, index,
partner_filter_ids=False):
"""
@param data: data of the wizard form
@param account_ids: ids of the accounts to get details
@param comparison_filter: selected filter on the form for
the comparison (filter_no, filter_year, filter_period, filter_date)
@param index: index of the fields to get (ie. comp1_fiscalyear_id
where 1 is the index)
@param partner_filter_ids: list of ids of partners to select
@return: dict of account details (key = account id)
"""
fiscalyear = self._get_info(
data, "comp%s_fiscalyear_id" % (index,), 'account.fiscalyear')
start_period = self._get_info(
data, "comp%s_period_from" % (index,), 'account.period')
stop_period = self._get_info(
data, "comp%s_period_to" % (index,), 'account.period')
start_date = self._get_form_param("comp%s_date_from" % (index,), data)
stop_date = self._get_form_param("comp%s_date_to" % (index,), data)
init_balance = self.is_initial_balance_enabled(comparison_filter)
comp_params = {}
accounts_details_by_ids = defaultdict(dict)
if comparison_filter != 'filter_no':
start_period, stop_period, start, stop = \
self._get_start_stop_for_filter(
comparison_filter, fiscalyear, start_date, stop_date,
start_period, stop_period)
details_filter = comparison_filter
if comparison_filter == 'filter_year':
details_filter = 'filter_no'
initial_balance_mode = init_balance \
and self._get_initial_balance_mode(start) or False
accounts_by_ids = self._get_account_details(
account_ids, target_move, fiscalyear, details_filter, start,
stop, initial_balance_mode)
partner_details_by_ids = self._get_account_partners_details(
accounts_by_ids, details_filter,
target_move, start, stop, initial_balance_mode,
partner_filter_ids=partner_filter_ids)
for account_id in account_ids:
accounts_details_by_ids[account_id][
'account'] = accounts_by_ids[account_id]
accounts_details_by_ids[account_id][
'partners_amounts'] = partner_details_by_ids[account_id]
comp_params = {
'comparison_filter': comparison_filter,
'fiscalyear': fiscalyear,
'start': start,
'stop': stop,
'initial_balance_mode': initial_balance_mode,
}
return accounts_details_by_ids, comp_params
def compute_partner_balance_data(self, data, filter_report_type=None):
new_ids = data['form']['account_ids'] or data[
'form']['chart_account_id']
max_comparison = self._get_form_param(
'max_comparison', data, default=0)
main_filter = self._get_form_param('filter', data, default='filter_no')
comp_filters, nb_comparisons, comparison_mode = self._comp_filters(
data, max_comparison)
fiscalyear = self.get_fiscalyear_br(data)
start_period = self.get_start_period_br(data)
stop_period = self.get_end_period_br(data)
target_move = self._get_form_param('target_move', data, default='all')
start_date = self._get_form_param('date_from', data)
stop_date = self._get_form_param('date_to', data)
chart_account = self._get_chart_account_id_br(data)
result_selection = self._get_form_param('result_selection', data)
partner_ids = self._get_form_param('partner_ids', data)
filter_type = self._get_filter_type(result_selection)
start_period, stop_period, start, stop = \
self._get_start_stop_for_filter(
main_filter, fiscalyear, start_date, stop_date, start_period,
stop_period)
initial_balance = self.is_initial_balance_enabled(main_filter)
initial_balance_mode = initial_balance \
and self._get_initial_balance_mode(start) or False
# Retrieving accounts
account_ids = self.get_all_accounts(
new_ids, only_type=filter_type,
filter_report_type=filter_report_type)
# get details for each accounts, total of debit / credit / balance
accounts_by_ids = self._get_account_details(
account_ids, target_move, fiscalyear, main_filter, start, stop,
initial_balance_mode)
partner_details_by_ids = self._get_account_partners_details(
accounts_by_ids, main_filter, target_move, start, stop,
initial_balance_mode, partner_filter_ids=partner_ids)
comparison_params = []
comp_accounts_by_ids = []
for index in range(max_comparison):
if comp_filters[index] != 'filter_no':
comparison_result, comp_params = self.\
_get_partners_comparison_details(
data, account_ids,
target_move,
comp_filters[index],
index,
partner_filter_ids=partner_ids)
comparison_params.append(comp_params)
comp_accounts_by_ids.append(comparison_result)
objects = self.pool.get('account.account').browse(self.cursor,
self.uid,
account_ids)
init_balance_accounts = {}
comparisons_accounts = {}
partners_order_accounts = {}
partners_amounts_accounts = {}
debit_accounts = {}
credit_accounts = {}
balance_accounts = {}
for account in objects:
if not account.parent_id: # hide top level account
continue
debit_accounts[account.id] = accounts_by_ids[account.id]['debit']
credit_accounts[account.id] = accounts_by_ids[account.id]['credit']
balance_accounts[account.id] = \
accounts_by_ids[account.id]['balance']
init_balance_accounts[account.id] = accounts_by_ids[
account.id].get('init_balance', 0.0)
partners_amounts_accounts[account.id] =\
partner_details_by_ids[account.id]
comp_accounts = []
for comp_account_by_id in comp_accounts_by_ids:
values = comp_account_by_id.get(account.id)
values['account'].update(
self._get_diff(account.balance,
values['account'].get('balance', 0.0)))
comp_accounts.append(values)
for partner_id, partner_values in \
values['partners_amounts'].copy().iteritems():
base_partner_balance = partners_amounts_accounts[account.id][partner_id]['balance']\
if partners_amounts_accounts.get(account.id)\
and partners_amounts_accounts.get(account.id)\
.get(partner_id) else 0.0
partner_values.update(self._get_diff(
base_partner_balance,
partner_values.get('balance', 0.0)))
values['partners_amounts'][
partner_id].update(partner_values)
comparisons_accounts[account.id] = comp_accounts
all_partner_ids = reduce(add, [comp['partners_amounts'].keys()
for comp in comp_accounts],
partners_amounts_accounts[account.id]
.keys())
partners_order_accounts[account.id] = \
self._order_partners(all_partner_ids)
context_report_values = {
'fiscalyear': fiscalyear,
'start_date': start_date,
'stop_date': stop_date,
'start_period': start_period,
'stop_period': stop_period,
'chart_account': chart_account,
'comparison_mode': comparison_mode,
'nb_comparison': nb_comparisons,
'comp_params': comparison_params,
'initial_balance_mode': initial_balance_mode,
'compute_diff': self._get_diff,
'init_balance_accounts': init_balance_accounts,
'comparisons_accounts': comparisons_accounts,
'partners_order_accounts': partners_order_accounts,
'partners_amounts_accounts': partners_amounts_accounts,
'debit_accounts': debit_accounts,
'credit_accounts': credit_accounts,
'balance_accounts': balance_accounts,
}
return objects, new_ids, context_report_values
| agpl-3.0 | 5,806,480,750,018,773,000 | 44.446629 | 104 | 0.553743 | false |
Inspq/ansible | lib/ansible/modules/messaging/rabbitmq_queue.py | 1 | 9738 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
short_description: This module manages rabbitMQ queues
description:
- This module uses rabbitMQ Rest API to create/delete queues
requirements: [ "requests >= 1.0.0" ]
options:
name:
description:
- Name of the queue to create
required: true
state:
description:
- Whether the queue should be present or absent
- Only present implemented atm
choices: [ "present", "absent" ]
required: false
default: present
login_user:
description:
- rabbitMQ user for connection
required: false
default: guest
login_password:
description:
- rabbitMQ password for connection
required: false
default: false
login_host:
description:
- rabbitMQ host for connection
required: false
default: localhost
login_port:
description:
- rabbitMQ management api port
required: false
default: 15672
vhost:
description:
- rabbitMQ virtual host
required: false
default: "/"
durable:
description:
- whether queue is durable or not
required: false
choices: [ "yes", "no" ]
default: yes
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
required: false
choices: [ "yes", "no" ]
default: no
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
required: False
default: forever
auto_expires:
description:
- How long a queue can be unused before it is automatically deleted (milliseconds)
required: false
default: forever
max_length:
description:
- How many messages can the queue contain before it starts rejecting
required: false
default: no limit
dead_letter_exchange:
description:
- Optional name of an exchange to which messages will be republished if they
- are rejected or expire
required: false
default: None
dead_letter_routing_key:
description:
- Optional replacement routing key to use when a message is dead-lettered.
- Original routing key will be used if unset
required: false
default: None
arguments:
description:
- extra arguments for queue. If defined this argument is a key/value dictionary
required: false
default: {}
'''
EXAMPLES = '''
# Create a queue
- rabbitmq_queue:
name: myQueue
# Create a queue on remote host
- rabbitmq_queue:
name: myRemoteQueue
login_user: user
login_password: secret
login_host: remote.example.org
'''
import requests
import urllib
import json
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
durable = dict(default=True, type='bool'),
auto_delete = dict(default=False, type='bool'),
message_ttl = dict(default=None, type='int'),
auto_expires = dict(default=None, type='int'),
max_length = dict(default=None, type='int'),
dead_letter_exchange = dict(default=None, type='str'),
dead_letter_routing_key = dict(default=None, type='str'),
arguments = dict(default=dict(), type='dict')
),
supports_check_mode = True
)
url = "http://%s:%s/api/queues/%s/%s" % (
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
module.params['name']
)
# Check if queue already exists
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
queue_exists = True
response = r.json()
elif r.status_code==404:
queue_exists = False
response = r.text
else:
module.fail_json(
msg = "Invalid response from RESTAPI when trying to check if queue exists",
details = r.text
)
if module.params['state']=='present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['auto_delete'] and
(
( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
) and
(
( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
) and
(
( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
) and
(
( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
) and
(
( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
)
):
module.fail_json(
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing queues",
)
# Copy parameters to arguments as used by RabbitMQ
for k,v in {
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key'
}.items():
if module.params[k] is not None:
module.params['arguments'][v] = module.params[k]
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
auth = (module.params['login_user'],module.params['login_password']),
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
elif module.params['state'] == 'absent':
r = requests.delete( url, auth = (module.params['login_user'],module.params['login_password']))
# RabbitMQ 3.6.7 changed this response code from 204 to 201
if r.status_code == 204 or r.status_code == 201:
module.exit_json(
changed = True,
name = module.params['name']
)
else:
module.fail_json(
msg = "Error creating queue",
status = r.status_code,
details = r.text
)
else:
module.exit_json(
changed = False,
name = module.params['name']
)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -1,233,860,522,779,790,000 | 34.282609 | 174 | 0.577737 | false |
rupran/ansible | lib/ansible/modules/cloud/openstack/os_subnet.py | 33 | 13522 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_subnet
short_description: Add/Remove subnet to an OpenStack network
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Add or Remove a subnet to an OpenStack network
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
required: false
default: present
network_name:
description:
- Name of the network to which the subnet should be attached
- Required when I(state) is 'present'
required: false
name:
description:
- The name of the subnet that should be created. Although Neutron
allows for non-unique subnet names, this module enforces subnet
name uniqueness.
required: true
cidr:
description:
- The CIDR representation of the subnet that should be assigned to
the subnet. Required when I(state) is 'present' and a subnetpool
is not specified.
required: false
default: None
ip_version:
description:
- The IP version of the subnet 4 or 6
required: false
default: 4
enable_dhcp:
description:
- Whether DHCP should be enabled for this subnet.
required: false
default: true
gateway_ip:
description:
- The ip that would be assigned to the gateway for this subnet
required: false
default: None
no_gateway_ip:
description:
- The gateway IP would not be assigned for this subnet
required: false
default: false
version_added: "2.2"
dns_nameservers:
description:
- List of DNS nameservers for this subnet.
required: false
default: None
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should
be allocated.
required: false
default: None
allocation_pool_end:
description:
- From the subnet pool the last IP that should be assigned to the
virtual machines.
required: false
default: None
host_routes:
description:
- A list of host route dictionaries for the subnet.
required: false
default: None
ipv6_ra_mode:
description:
- IPv6 router advertisement mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
ipv6_address_mode:
description:
- IPv6 address mode
choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
required: false
default: None
use_default_subnetpool:
description:
- Use the default subnetpool for I(ip_version) to obtain a CIDR.
required: false
default: false
project:
description:
- Project name or ID containing the subnet (name admin-only)
required: false
default: None
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
state: present
network_name: network1
name: net1subnet
cidr: 192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
nexthop: 12.34.56.78
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
state: absent
name: net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
state: present
name: intv6
network_name: internal
ip_version: 6
cidr: 2db8:1::/64
dns_nameservers:
- 2001:4860:4860::8888
- 2001:4860:4860::8844
ipv6_ra_mode: dhcpv6-stateless
ipv6_address_mode: dhcpv6-stateless
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _can_update(subnet, module, cloud):
"""Check for differences in non-updatable values"""
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = int(module.params['ip_version'])
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
if network_name:
network = cloud.get_network(network_name)
if network:
netid = network['id']
else:
module.fail_json(msg='No network found for %s' % network_name)
if netid != subnet['network_id']:
module.fail_json(msg='Cannot update network_name in existing \
subnet')
if ip_version and subnet['ip_version'] != ip_version:
module.fail_json(msg='Cannot update ip_version in existing subnet')
if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ipv6_ra_mode:
module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet')
if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode:
module.fail_json(msg='Cannot update ipv6_address_mode in existing \
subnet')
def _needs_update(subnet, module, cloud):
"""Check for differences in the updatable values."""
# First check if we are trying to update something we're not allowed to
_can_update(subnet, module, cloud)
# now check for the things we are allowed to update
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
host_routes = module.params['host_routes']
curr_pool = subnet['allocation_pools'][0]
if subnet['enable_dhcp'] != enable_dhcp:
return True
if subnet_name and subnet['name'] != subnet_name:
return True
if pool_start and curr_pool['start'] != pool_start:
return True
if pool_end and curr_pool['end'] != pool_end:
return True
if gateway_ip and subnet['gateway_ip'] != gateway_ip:
return True
if dns and sorted(subnet['dns_nameservers']) != sorted(dns):
return True
if host_routes:
curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys())
new_hr = sorted(host_routes, key=lambda t: t.keys())
if sorted(curr_hr) != sorted(new_hr):
return True
if no_gateway_ip and subnet['gateway_ip']:
return True
return False
def _system_state_change(module, subnet, cloud):
state = module.params['state']
if state == 'present':
if not subnet:
return True
return _needs_update(subnet, module, cloud)
if state == 'absent' and subnet:
return True
return False
def main():
ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac']
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
network_name=dict(default=None),
cidr=dict(default=None),
ip_version=dict(default='4', choices=['4', '6']),
enable_dhcp=dict(default='true', type='bool'),
gateway_ip=dict(default=None),
no_gateway_ip=dict(default=False, type='bool'),
dns_nameservers=dict(default=None, type='list'),
allocation_pool_start=dict(default=None),
allocation_pool_end=dict(default=None),
host_routes=dict(default=None, type='list'),
ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices),
ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices),
use_default_subnetpool=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
network_name = module.params['network_name']
cidr = module.params['cidr']
ip_version = module.params['ip_version']
enable_dhcp = module.params['enable_dhcp']
subnet_name = module.params['name']
gateway_ip = module.params['gateway_ip']
no_gateway_ip = module.params['no_gateway_ip']
dns = module.params['dns_nameservers']
pool_start = module.params['allocation_pool_start']
pool_end = module.params['allocation_pool_end']
host_routes = module.params['host_routes']
ipv6_ra_mode = module.params['ipv6_ra_mode']
ipv6_a_mode = module.params['ipv6_address_mode']
use_default_subnetpool = module.params['use_default_subnetpool']
project = module.params.pop('project')
if (use_default_subnetpool and
StrictVersion(shade.__version__) < StrictVersion('1.16.0')):
module.fail_json(msg="To utilize use_default_subnetpool, the installed"
" version of the shade library MUST be >=1.16.0")
# Check for required parameters when state == 'present'
if state == 'present':
if not module.params['network_name']:
module.fail_json(msg='network_name required with present state')
if not module.params['cidr'] and not use_default_subnetpool:
module.fail_json(msg='cidr or use_default_subnetpool required '
'with present state')
if pool_start and pool_end:
pool = [dict(start=pool_start, end=pool_end)]
elif pool_start or pool_end:
module.fail_json(msg='allocation pool requires start and end values')
else:
pool = None
if no_gateway_ip and gateway_ip:
module.fail_json(msg='no_gateway_ip is not allowed with gateway_ip')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
subnet = cloud.get_subnet(subnet_name, filters=filters)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, subnet,
cloud))
if state == 'present':
if not subnet:
subnet = cloud.create_subnet(
network_name, cidr,
ip_version=ip_version,
enable_dhcp=enable_dhcp,
subnet_name=subnet_name,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes,
ipv6_ra_mode=ipv6_ra_mode,
ipv6_address_mode=ipv6_a_mode,
use_default_subnetpool=use_default_subnetpool,
tenant_id=project_id)
changed = True
else:
if _needs_update(subnet, module, cloud):
cloud.update_subnet(subnet['id'],
subnet_name=subnet_name,
enable_dhcp=enable_dhcp,
gateway_ip=gateway_ip,
disable_gateway_ip=no_gateway_ip,
dns_nameservers=dns,
allocation_pools=pool,
host_routes=host_routes)
changed = True
else:
changed = False
module.exit_json(changed=changed,
subnet=subnet,
id=subnet['id'])
elif state == 'absent':
if not subnet:
changed = False
else:
changed = True
cloud.delete_subnet(subnet_name)
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,650,528,516,379,233,000 | 33.671795 | 79 | 0.60213 | false |
firebitsbr/raspberry_pwn | src/pentest/sqlmap/tamper/ifnull2ifisnull.py | 7 | 1625 | #!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.HIGHEST
def dependencies():
pass
def tamper(payload, **kwargs):
"""
Replaces instances like 'IFNULL(A, B)' with 'IF(ISNULL(A), B, A)'
Requirement:
* MySQL
* SQLite (possibly)
* SAP MaxDB (possibly)
Tested against:
* MySQL 5.0 and 5.5
Notes:
* Useful to bypass very weak and bespoke web application firewalls
that filter the IFNULL() function
>>> tamper('IFNULL(1, 2)')
'IF(ISNULL(1),2,1)'
"""
if payload and payload.find("IFNULL") > -1:
while payload.find("IFNULL(") > -1:
index = payload.find("IFNULL(")
depth = 1
comma, end = None, None
for i in xrange(index + len("IFNULL("), len(payload)):
if depth == 1 and payload[i] == ',':
comma = i
elif depth == 1 and payload[i] == ')':
end = i
break
elif payload[i] == '(':
depth += 1
elif payload[i] == ')':
depth -= 1
if comma and end:
_ = payload[index + len("IFNULL("):comma]
__ = payload[comma + 1:end].lstrip()
newVal = "IF(ISNULL(%s),%s,%s)" % (_, __, _)
payload = payload[:index] + newVal + payload[end + 1:]
else:
break
return payload
| gpl-3.0 | -321,504,816,558,163,700 | 24.793651 | 74 | 0.481846 | false |
justyns/home-assistant | tests/components/notify/test_command_line.py | 11 | 2478 | """The tests for the command line notification platform."""
import os
import tempfile
import unittest
import homeassistant.components.notify as notify
from tests.common import get_test_home_assistant
from unittest.mock import patch
class TestCommandLine(unittest.TestCase):
"""Test the command line notifications."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_bad_config(self):
"""Test set up the platform with bad/missing config."""
self.assertFalse(notify.setup(self.hass, {
'notify': {
'name': 'test',
'platform': 'bad_platform',
}
}))
self.assertFalse(notify.setup(self.hass, {
'notify': {
'name': 'test',
'platform': 'command_line',
}
}))
def test_command_line_output(self):
"""Test the command line output."""
with tempfile.TemporaryDirectory() as tempdirname:
filename = os.path.join(tempdirname, 'message.txt')
message = 'one, two, testing, testing'
self.assertTrue(notify.setup(self.hass, {
'notify': {
'name': 'test',
'platform': 'command_line',
'command': 'echo $(cat) > {}'.format(filename)
}
}))
self.hass.services.call('notify', 'test', {'message': message},
blocking=True)
result = open(filename).read()
# the echo command adds a line break
self.assertEqual(result, "{}\n".format(message))
@patch('homeassistant.components.notify.command_line._LOGGER.error')
def test_error_for_none_zero_exit_code(self, mock_error):
"""Test if an error is logged for non zero exit codes."""
self.assertTrue(notify.setup(self.hass, {
'notify': {
'name': 'test',
'platform': 'command_line',
'command': 'echo $(cat); exit 1'
}
}))
self.hass.services.call('notify', 'test', {'message': 'error'},
blocking=True)
self.assertEqual(1, mock_error.call_count)
| mit | 2,794,977,823,638,760,000 | 33.416667 | 75 | 0.54318 | false |
nesterione/experiments-of-programming | MongoDB/Python/Week2/Classroom/using_update.py | 1 | 1735 |
import pymongo
import datetime
import sys
# establish a connection to the database
connection = pymongo.MongoClient("mongodb://localhost")
# add a review date to a single record using update_one
def add_review_date_using_update_one(student_id):
print ("updating record using update_one and $set")
# get a handle to the school database
db=connection.school
scores = db.scores
try:
# get the doc
score = scores.find_one({'student_id':student_id, 'type':'homework'})
print ("before: ", score)
# update using set
record_id = score['_id']
#result = scores.update_one({'_id':record_id},
# {'$set':{'review_date':datetime.datetime.utcnow()}})
result = scores.update({'_id':record_id},
{'$set':{'review_date':datetime.datetime.utcnow()}})
print ("num matched: ", result.matched_count)
score = scores.find_one({'_id':record_id})
print ("after: ", score)
except Exception as e:
raise
# add a review date to all records
def add_review_dates_for_all():
print ("updating record using update_one and $set")
# get a handle to the school database
db=connection.school
scores = db.scores
try:
# update all the docs
result = score = scores.update_many({},{'$set':{'review_date':datetime.datetime.utcnow()}})
#result = score = scores.insert_many({},{'$set':{'review_date':datetime.datetime.utcnow()} } )
#print ("num matched: ", result.matched_count)
print ("num matched: ", result)
except Exception as e:
raise
#add_review_date_using_update_one(1)
add_review_dates_for_all()
| apache-2.0 | 8,412,355,174,187,856,000 | 27.442623 | 102 | 0.602305 | false |
mahak/packstack | packstack/puppet/modules/packstack/lib/facter/netns.py | 6 | 8175 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import inspect
import os
import random
import subprocess
import sys
import tempfile
import uuid
import unittest
def execute(cmd_string, check_error=True, return_code=0, input=None,
block=True, error_msg='Error executing cmd'):
print(cmd_string)
cmd = cmd_string.split(' ')
proc = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if input:
proc.communicate(input=input)
elif block:
proc.wait()
if (check_error and
proc.returncode is not None and
proc.returncode != return_code):
msg = """
%(error_msg)s
Command: %(cmd)s
Exit Code: %(code)s
""".strip() % dict(cmd=' '.join(cmd),
code=proc.returncode,
error_msg=error_msg)
if input:
msg += "\n Stdin: %s" % input
if not proc.stdout.closed:
msg += "\n Stdout: %s" % proc.stdout.read()
if not proc.stderr.closed:
msg += "\n Stderr: %s" % proc.stderr.read()
raise Exception(msg)
return proc
def e(cmd, prefix='ip netns exec ', sudo=False, **kwargs):
frame_locals = inspect.getargvalues(sys._getframe(1))[3]
if sudo:
prefix = 'sudo ' + prefix
return execute(prefix + cmd % frame_locals, **kwargs)
def rand_name(name='test'):
return '%s-%s' % (name, str(random.randint(1, 0x7fffffff)))
@contextlib.contextmanager
def add_namespace():
name = rand_name('testns')
try:
e('ip netns add %(name)s', prefix='')
e('%(name)s ip link set lo up')
yield name
finally:
e('ip netns delete %(name)s', prefix='')
@contextlib.contextmanager
def add_namespaces():
with add_namespace() as ns1:
with add_namespace() as ns2:
yield ns1, ns2
def add_veth_pair(ns1, ns2, veth1, veth2, address1, address2):
e('ip link add %(veth1)s netns %(ns1)s type veth '
'peer name %(veth2)s netns %(ns2)s', prefix='')
e('%(ns1)s ip link show %(veth1)s')
e('%(ns2)s ip link show %(veth2)s')
e('%(ns1)s ip -4 addr add %(address1)s/24 brd 255.255.255.0 '
'scope global dev %(veth1)s')
e('%(ns2)s ip -4 addr add %(address2)s/24 brd 255.255.255.0 '
'scope global dev %(veth2)s')
e('%(ns1)s ip link set %(veth1)s up')
e('%(ns2)s ip link set %(veth2)s up')
class TestNetns(unittest.TestCase):
def test_neutron_netns_cmds(self):
"""Exercise the netns functionality required by neutron.
- Check that a veth pair can be configured to transit traffic
between 2 namespaces
- Check that iptables filtering can be configured
- Check that iptables routing can be configured
"""
# Naming scheme [resource][id]_[namespace id]
veth1_1 = 'veth1_1'
veth1_2 = 'veth1_2'
address1_1 = '192.168.0.1'
address1_2 = '192.168.0.2'
with add_namespaces() as (ns1, ns2):
# Check that inter-namespace connectivity can be established
add_veth_pair(ns1, ns2, veth1_1, veth1_2, address1_1, address1_2)
e('%(ns1)s ip link list')
e('%(ns1)s ip link show %(veth1_1)s')
e('%(ns1)s arping -A -U -I %(veth1_1)s '
'-c 1 %(address1_1)s')
e('%(ns2)s route add default gw %(address1_1)s')
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
e('ping -c 1 -w 1 %(address1_1)s', prefix='', return_code=1,
error_msg='Namespace isolation not supported!')
# Check that iptables filtering and save/restore can be performed
try:
iptables_filename = os.path.join(
tempfile.gettempdir(),
'iptables-%s' % str(uuid.uuid4()))
e('%%(ns1)s iptables-save > %s' % iptables_filename)
e('%(ns1)s iptables -A INPUT -p icmp --icmp-type 8 -j DROP')
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s', return_code=1)
e('%%(ns1)s iptables-restore < %s' % iptables_filename)
e('%(ns2)s ping -c 1 -w 1 %(address1_1)s')
finally:
if os.path.exists(iptables_filename):
os.unlink(iptables_filename)
# Create another namespace (ns3) that is connected to ns1
# via a different subnet, so that traffic between ns3 and
# ns2 will have to be routed by ns1:
#
# ns2 <- 192.168.0.0/24 -> ns1 <- 192.168.1.0/24 -> ns3
#
with add_namespace() as ns3:
veth2_1 = 'veth2_1'
veth2_3 = 'veth2_3'
address2_1 = '192.168.1.1'
address2_3 = '192.168.1.2'
add_veth_pair(ns1, ns3, veth2_1, veth2_3,
address2_1, address2_3)
e('%(ns1)s sysctl -w net.ipv4.ip_forward=1')
e('%(ns1)s iptables -t nat -A POSTROUTING -o %(veth2_1)s -j '
'MASQUERADE')
e('%(ns1)s iptables -A FORWARD -i %(veth2_1)s -o %(veth1_1)s '
'-m state --state RELATED,ESTABLISHED -j ACCEPT')
e('%(ns1)s iptables -A FORWARD -i %(veth1_1)s -o %(veth2_1)s '
'-j ACCEPT')
e('%(ns2)s ping -c 1 -w 1 %(address2_3)s')
# Check that links can be torn down
e('%(ns1)s ip -4 addr del %(address1_1)s/24 '
'dev %(veth1_1)s')
e('%(ns1)s ip link delete %(veth1_1)s')
def test_domain_socket_access(self):
"""Check that a domain socket can be accessed regardless of namespace.
Neutron extends nova' metadata service - which identifies VM's
by their ip addresses - to configurations with overlapping
ips. Support is provided by:
- a proxy in each namespace (neutron-ns-metadata-proxy)
- the proxy can uniquely identify a given VM by its ip
address in the context of the router or network of the
namespace.
- a metadata agent (neutron-metadata-agent) that forwards
requests from the namespace proxies to nova's metadata
service.
Communication between the proxies and the agent is over a unix
domain socket. It is necessary that access to a domain socket
not be restricted by namespace, or such communication will not
be possible.
"""
try:
execute('which nc')
except Exception:
self.fail("The 'nc' command is not available - please install it.")
sock_filename = os.path.join(tempfile.gettempdir(),
'testsock-%s' % str(uuid.uuid4()))
server = None
try:
# Create a server in the root namespace attached to a domain socket
server = e('nc -lU %(sock_filename)s', sudo=False, prefix='',
block=False)
# Attempt to connect to the domain socket from within a namespace
with add_namespace() as ns:
e('%(ns)s nc -U %(sock_filename)s', input='magic',
error_msg='Unable to communicate between namespaces via '
'domain sockets.')
finally:
if server:
server.kill()
if os.path.exists(sock_filename):
os.unlink(sock_filename)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,710,294,314,538,621,000 | 36.5 | 79 | 0.557064 | false |
MrCodeYu/spark | python/pyspark/__init__.py | 5 | 3747 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark is the Python API for Spark.
Public classes:
- :class:`SparkContext`:
Main entry point for Spark functionality.
- :class:`RDD`:
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
- :class:`Broadcast`:
A broadcast variable that gets reused across tasks.
- :class:`Accumulator`:
An "add-only" shared variable that tasks can only add values to.
- :class:`SparkConf`:
For configuring Spark.
- :class:`SparkFiles`:
Access files shipped with jobs.
- :class:`StorageLevel`:
Finer-grained cache persistence levels.
"""
from functools import wraps
import types
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
from pyspark.status import *
from pyspark.profiler import Profiler, BasicProfiler
def since(version):
"""
A decorator that annotates a function to append the version of Spark the function was added.
"""
import re
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco
def copy_func(f, name=None, sinceversion=None, doc=None):
"""
Returns a function with same code, globals, defaults, closure, and
name (or provide a new name).
"""
# See
# http://stackoverflow.com/questions/6527633/how-can-i-make-a-deepcopy-of-a-function-in-python
fn = types.FunctionType(f.__code__, f.__globals__, name or f.__name__, f.__defaults__,
f.__closure__)
# in case f was given attrs (note this dict is a shallow copy):
fn.__dict__.update(f.__dict__)
if doc is not None:
fn.__doc__ = doc
if sinceversion is not None:
fn = since(sinceversion)(fn)
return fn
def keyword_only(func):
"""
A decorator that forces keyword arguments in the wrapped method
and saves actual input keyword arguments in `_input_kwargs`.
"""
@wraps(func)
def wrapper(*args, **kwargs):
if len(args) > 1:
raise TypeError("Method %s forces keyword arguments." % func.__name__)
wrapper._input_kwargs = kwargs
return func(*args, **kwargs)
return wrapper
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
__all__ = [
"SparkConf", "SparkContext", "SparkFiles", "RDD", "StorageLevel", "Broadcast",
"Accumulator", "AccumulatorParam", "MarshalSerializer", "PickleSerializer",
"StatusTracker", "SparkJobInfo", "SparkStageInfo", "Profiler", "BasicProfiler",
]
| apache-2.0 | -3,309,597,525,621,845,500 | 33.376147 | 98 | 0.689351 | false |
Andypsamp/CODjunit | test/test_lastgenre.py | 25 | 8245 | # This file is part of beets.
# Copyright 2015, Fabrice Laporte.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the 'lastgenre' plugin."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import Mock
from test import _common
from test._common import unittest
from beetsplug import lastgenre
from beets import config
from test.helper import TestHelper
class LastGenrePluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.plugin = lastgenre.LastGenrePlugin()
def tearDown(self):
self.teardown_beets()
def _setup_config(self, whitelist=False, canonical=False, count=1):
config['lastgenre']['canonical'] = canonical
config['lastgenre']['count'] = count
if isinstance(whitelist, (bool, basestring)):
# Filename, default, or disabled.
config['lastgenre']['whitelist'] = whitelist
self.plugin.setup()
if not isinstance(whitelist, (bool, basestring)):
# Explicit list of genres.
self.plugin.whitelist = whitelist
def test_default(self):
"""Fetch genres with whitelist and c14n deactivated
"""
self._setup_config()
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues')
def test_c14n_only(self):
"""Default c14n tree funnels up to most common genre except for *wrong*
genres that stay unchanged.
"""
self._setup_config(canonical=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'Iota Blues')
def test_whitelist_only(self):
"""Default whitelist rejects *wrong* (non existing) genres.
"""
self._setup_config(whitelist=True)
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_whitelist_c14n(self):
"""Default whitelist and c14n both activated result in all parents
genres being selected (from specific to common).
"""
self._setup_config(canonical=True, whitelist=True, count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Delta Blues, Blues')
def test_whitelist_custom(self):
"""Keep only genres that are in the whitelist.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(['pop', 'blues']),
'Blues')
self._setup_config(canonical='', whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_count(self):
"""Keep the n first genres, as we expect them to be sorted from more to
less popular.
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
count=2)
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'rock', 'blues']),
'Jazz, Rock')
def test_count_c14n(self):
"""Keep the n first genres, after having applied c14n when necessary
"""
self._setup_config(whitelist=set(['blues', 'rock', 'jazz']),
canonical=True,
count=2)
# thanks to c14n, 'blues' superseeds 'country blues' and takes the
# second slot
self.assertEqual(self.plugin._resolve_genres(
['jazz', 'pop', 'country blues', 'rock']),
'Jazz, Blues')
def test_c14n_whitelist(self):
"""Genres first pass through c14n and are then filtered
"""
self._setup_config(canonical=True, whitelist=set(['rock']))
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'')
def test_empty_string_enables_canonical(self):
"""For backwards compatibility, setting the `canonical` option
to the empty string enables it using the default tree.
"""
self._setup_config(canonical='', count=99)
self.assertEqual(self.plugin._resolve_genres(['delta blues']),
'Blues')
def test_empty_string_enables_whitelist(self):
"""Again for backwards compatibility, setting the `whitelist`
option to the empty string enables the default set of genres.
"""
self._setup_config(whitelist='')
self.assertEqual(self.plugin._resolve_genres(['iota blues']),
'')
def test_no_duplicate(self):
"""Remove duplicated genres.
"""
self._setup_config(count=99)
self.assertEqual(self.plugin._resolve_genres(['blues', 'blues']),
'Blues')
def test_tags_for(self):
class MockPylastElem(object):
def __init__(self, name):
self.name = name
def get_name(self):
return self.name
class MockPylastObj(object):
def get_top_tags(self):
tag1 = Mock()
tag1.weight = 90
tag1.item = MockPylastElem(u'Pop')
tag2 = Mock()
tag2.weight = 40
tag2.item = MockPylastElem(u'Rap')
return [tag1, tag2]
plugin = lastgenre.LastGenrePlugin()
res = plugin._tags_for(MockPylastObj())
self.assertEqual(res, [u'pop', u'rap'])
res = plugin._tags_for(MockPylastObj(), min_weight=50)
self.assertEqual(res, [u'pop'])
def test_get_genre(self):
MOCK_GENRES = {'track': u'1', 'album': u'2', 'artist': u'3'}
def mock_fetch_track_genre(self, obj=None):
return MOCK_GENRES['track']
def mock_fetch_album_genre(self, obj):
return MOCK_GENRES['album']
def mock_fetch_artist_genre(self, obj):
return MOCK_GENRES['artist']
lastgenre.LastGenrePlugin.fetch_track_genre = mock_fetch_track_genre
lastgenre.LastGenrePlugin.fetch_album_genre = mock_fetch_album_genre
lastgenre.LastGenrePlugin.fetch_artist_genre = mock_fetch_artist_genre
self._setup_config(whitelist=False)
item = _common.item()
item.genre = MOCK_GENRES['track']
config['lastgenre'] = {'force': False}
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'keep'))
config['lastgenre'] = {'force': True, 'source': 'track'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['track'], 'track'))
config['lastgenre'] = {'source': 'album'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['album'], 'album'))
config['lastgenre'] = {'source': 'artist'}
res = self.plugin._get_genre(item)
self.assertEqual(res, (MOCK_GENRES['artist'], 'artist'))
MOCK_GENRES['artist'] = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (item.genre, 'original'))
config['lastgenre'] = {'fallback': 'rap'}
item.genre = None
res = self.plugin._get_genre(item)
self.assertEqual(res, (config['lastgenre']['fallback'].get(),
'fallback'))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit | -6,435,556,565,754,205,000 | 36.477273 | 79 | 0.584718 | false |
sickkids-ccm/dcc-file-transfer | migrations/versions/6a5d6b8f2d27_.py | 1 | 4480 | """empty message
Revision ID: 6a5d6b8f2d27
Revises: None
Create Date: 2016-05-12 18:48:06.996525
"""
# revision identifiers, used by Alembic.
revision = '6a5d6b8f2d27'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('servers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('server_token', sa.String(), nullable=True),
sa.Column('server_id', sa.String(), nullable=True),
sa.Column('server_name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('server_token')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.String(), nullable=True),
sa.Column('user_name', sa.String(), nullable=True),
sa.Column('user_email', sa.String(), nullable=True),
sa.Column('server_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['server_id'], ['servers.server_id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id')
)
op.create_table('access',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('auth_token', sa.String(), nullable=True),
sa.Column('creation_date', sa.DateTime(), nullable=True),
sa.Column('expiration_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('auth_token')
)
op.create_table('samples',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sample_name', sa.String(), nullable=True),
sa.Column('user_id', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('sample_name', 'user_id', name='sample_id')
)
op.create_table('files',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('identifier', sa.String(), nullable=True),
sa.Column('filename', sa.String(), nullable=True),
sa.Column('total_size', sa.Integer(), nullable=True),
sa.Column('file_type', sa.String(), nullable=True),
sa.Column('readset', sa.String(), nullable=True),
sa.Column('platform', sa.String(), nullable=True),
sa.Column('run_type', sa.String(), nullable=True),
sa.Column('capture_kit', sa.String(), nullable=True),
sa.Column('library', sa.String(), nullable=True),
sa.Column('reference', sa.String(), nullable=True),
sa.Column('upload_status', sa.String(), nullable=True),
sa.Column('upload_start_date', sa.DateTime(), nullable=True),
sa.Column('upload_end_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.String(), nullable=True),
sa.Column('access_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['access_id'], ['access.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('identifier')
)
op.create_table('runs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('readset', sa.String(), nullable=True),
sa.Column('library', sa.String(), nullable=True),
sa.Column('run_type', sa.String(), nullable=True),
sa.Column('bed', sa.String(), nullable=True),
sa.Column('fastq1', sa.String(), nullable=True),
sa.Column('fastq2', sa.String(), nullable=True),
sa.Column('bam', sa.String(), nullable=True),
sa.Column('status', sa.String(), nullable=True),
sa.Column('user_id', sa.String(), nullable=True),
sa.Column('sample_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sample_id'], ['samples.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.user_id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('sample_file_link',
sa.Column('sample_id', sa.Integer(), nullable=True),
sa.Column('file_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['file_id'], ['files.id'], ),
sa.ForeignKeyConstraint(['sample_id'], ['samples.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('sample_file_link')
op.drop_table('runs')
op.drop_table('files')
op.drop_table('samples')
op.drop_table('access')
op.drop_table('users')
op.drop_table('servers')
### end Alembic commands ###
| mit | 5,050,359,744,333,633,000 | 39.36036 | 68 | 0.645089 | false |
lahosken/pants | src/python/pants/build_graph/intransitive_dependency.py | 11 | 1380 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.build_graph.intermediate_target_factory import IntermediateTargetFactoryBase
class IntransitiveDependencyFactory(IntermediateTargetFactoryBase):
"""Creates a dependency which is intransitive.
This dependency will not be seen by dependees of this target. The syntax for this feature is
experimental and may change in the future.
"""
@property
def extra_target_arguments(self):
return dict(_transitive=False)
def __call__(self, address):
return self._create_intermediate_target(address, 'intransitive')
class ProvidedDependencyFactory(IntermediateTargetFactoryBase):
"""Creates an intransitive dependency with scope='compile test'.
This mirrors the behavior of the "provided" scope found in other build systems, such as Gradle,
Maven, and IntelliJ.
The syntax for this feature is experimental and may change in the future.
"""
@property
def extra_target_arguments(self):
return dict(_transitive=False, scope='compile test')
def __call__(self, address):
return self._create_intermediate_target(address, 'provided')
| apache-2.0 | 1,273,221,651,594,387,700 | 33.5 | 97 | 0.752174 | false |
skoppisetty/idigbio-appliance | lib/sqlalchemy/util/compat.py | 17 | 6273 | # util/compat.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py32 = sys.version_info >= (3, 2)
py3k_warning = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
if py3k_warning:
set_types = set
elif sys.version_info < (2, 6):
import sets
set_types = set, sets.Set
else:
# 2.6 deprecates sets.Set, but we still need to be able to detect them
# in user code and as return values from DB-APIs
ignore = ('ignore', None, DeprecationWarning, None, 0)
import warnings
try:
warnings.filters.insert(0, ignore)
except Exception:
import sets
else:
import sets
warnings.filters.remove(ignore)
set_types = set, sets.Set
if py3k_warning:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
# a controversial feature, required by MySQLdb currently
def buffer(x):
return x
# Py2K
buffer = buffer
# end Py2K
try:
from contextlib import contextmanager
except ImportError:
def contextmanager(fn):
return fn
try:
from functools import update_wrapper
except ImportError:
def update_wrapper(wrapper, wrapped,
assigned=('__doc__', '__module__', '__name__'),
updated=('__dict__',)):
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, ()))
return wrapper
try:
from functools import partial
except ImportError:
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
return newfunc
if sys.version_info < (2, 6):
# emits a nasty deprecation warning
# in newer pythons
from cgi import parse_qsl
else:
from urlparse import parse_qsl
# Py3K
#from inspect import getfullargspec as inspect_getfullargspec
# Py2K
from inspect import getargspec as inspect_getfullargspec
# end Py2K
if py3k_warning:
# they're bringing it back in 3.2. brilliant !
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
else:
callable = callable
cmp = cmp
reduce = reduce
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.iteritems()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
# find or create a dict implementation that supports __missing__
class _probe(dict):
def __missing__(self, key):
return 1
try:
try:
_probe()['missing']
py25_dict = dict
except KeyError:
class py25_dict(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
try:
missing = self.__missing__
except AttributeError:
raise KeyError(key)
else:
return missing(key)
finally:
del _probe
try:
import hashlib
_md5 = hashlib.md5
except ImportError:
import md5
_md5 = md5.new
def md5_hex(x):
# Py3K
#x = x.encode('utf-8')
m = _md5()
m.update(x)
return m.hexdigest()
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
if sys.version_info >= (2, 5):
any = any
else:
def any(iterator):
for item in iterator:
if bool(item):
return True
else:
return False
if sys.version_info >= (2, 5):
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
else:
def decode_slice(slc):
return (slc.start, slc.stop, slc.step)
if sys.version_info >= (2, 6):
from operator import attrgetter as dottedgetter
else:
def dottedgetter(attr):
def g(obj):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
return g
import decimal
| gpl-3.0 | 7,171,898,382,078,776,000 | 25.468354 | 84 | 0.570222 | false |
cooperative-computing-lab/cctools | prune/src/prune/glob.py | 8 | 4688 | # Copyright (c) 2010- The University of Notre Dame.
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
import os, sys, hashlib, time, json
from utils import *
ready = False
shutting_down = False
HOME = os.path.expanduser("~")
CWD = os.getcwd()
base_dir = HOME+'/.prune/'
data_file_directory = base_dir+'data/files/'
data_db_pathname = base_dir+'data/_prune.db'
data_log_pathname = base_dir+'logs/data.log'
cache_file_directory = base_dir+'cache/files/'
cache_db_pathname = base_dir+'cache/_prune.db'
cache_log_pathname = base_dir+'logs/cache.log'
trash_file_directory = base_dir+'trash/files/'
trash_db_pathname = base_dir+'trash/_prune.db'
trash_log_pathname = base_dir+'logs/trash.log'
work_db_pathname = base_dir+'_work.db'
work_log_pathname = base_dir+'logs/work.log'
sandbox_directory = base_dir+'sandboxes/'
tmp_file_directory = base_dir+'tmp/'
# Server settings
#server_log_pathname = base_dir+'logs/server.log'
#hostname = '127.0.0.1'
#port = 8073
# Worker settings
worker_log_pathname = base_dir+'logs/worker.log'
timer_log = base_dir+'logs/timing.log'
wq_debug_log_pathname = base_dir+'logs/wq_debug.log'
wq_log_pathname = base_dir+'logs/wq.log'
def set_base_dir( new_base_dir ):
global base_dir
global data_file_directory, data_db_pathname, data_log_pathname
if data_file_directory.startswith(base_dir):
data_file_directory = new_base_dir+'data/files/'
data_db_pathname = new_base_dir+'data/_prune.db'
data_log_pathname = new_base_dir+'logs/data.log'
global cache_file_directory, cache_db_pathname, cache_log_pathname
if cache_file_directory.startswith(base_dir):
cache_file_directory = new_base_dir+'cache/files/'
cache_db_pathname = new_base_dir+'cache/_prune.db'
cache_log_pathname = new_base_dir+'logs/cache.log'
global trash_file_directory, trash_db_pathname, trash_log_pathname
if trash_file_directory.startswith(base_dir):
trash_file_directory = new_base_dir+'trash/files/'
trash_db_pathname = new_base_dir+'trash/_prune.db'
trash_log_pathname = new_base_dir+'logs/trash.log'
global work_db_pathname, work_log_pathname
if work_db_pathname.startswith(base_dir):
work_db_pathname = new_base_dir+'_work.db'
work_log_pathname = new_base_dir+'logs/work.log'
global sandbox_directory, tmp_file_directory
if sandbox_directory.startswith(base_dir):
sandbox_directory = new_base_dir+'sandboxes/'
tmp_file_directory = new_base_dir+'tmp/'
global worker_log_pathname
if worker_log_pathname.startswith(base_dir):
worker_log_pathname = new_base_dir+'logs/worker.log'
global timer_log
if timer_log.startswith(base_dir):
timer_log = new_base_dir+'logs/timing.log'
global wq_debug_log_pathname, wq_log_pathname
if wq_debug_log_pathname.startswith(base_dir):
wq_debug_log_pathname = new_base_dir+'logs/wq_debug.log'
wq_log_pathname = new_base_dir+'logs/wq.log'
base_dir = new_base_dir
#total_quota = 30000000000000 # considers data and cache
total_quota = 100000000000 # considers data and cache
exec_local_concurrency = 16
repository_id = uuid()
workflow_id = None
workflow_step = None
wq_port = 0
#wq_name = 'prune_'+uuid()
wq_name = 'prune'
wq_stage = None
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
cctools_version = 'CCTOOLS_VERSION'
cctools_releasedate = 'CCTOOLS_RELEASE_DATE'
def set_config_file(new_config_file):
if os.path.isfile(new_config_file):
with open(new_config_file) as f:
json_str = f.read(1024*1024)
body = json.loads(json_str)
for key in body:
val = body[key]
if key=='base_dir':
set_base_dir( val )
elif key=='repository_id':
global repository_id
repository_id = val
elif key=='total_quota':
global total_quota
total_quota = int(val)
elif key=='wq_name':
global wq_name
wq_name = val
elif key=='exec_local_concurrency':
global exec_local_concurrency
exec_local_concurrency = int(val)
else:
print 'Unknown config option:',key, val
else:
print 'File not found:',new_config_file
config_file = HOME+'/.prune/config'
if not os.path.isfile(config_file):
with open(config_file,'w') as f:
obj = {'base_dir':base_dir, 'repository_id':repository_id,
'total_quota':total_quota, 'wq_name':wq_name,
'exec_local_concurrency':exec_local_concurrency}
f.write(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))
else:
set_config_file( config_file )
if False:
obj = {'base_dir':base_dir, 'repository_id':repository_id,
'total_quota':total_quota, 'wq_name':wq_name,
'exec_local_concurrency':exec_local_concurrency}
print json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
| gpl-2.0 | -2,328,705,761,841,673,000 | 28.118012 | 76 | 0.705631 | false |
couchbase/couchbase-python-client | couchbase_v2/tests/cases/view_t.py | 1 | 6004 | #
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from couchbase_tests.base import ViewTestCase
from couchbase_core.user_constants import FMT_JSON
from couchbase_v2.exceptions import HTTPException, NotSupportedException
from couchbase_v2.bucket import Bucket
from couchbase.management.users import Role
from couchbase.auth import AuthDomain
from nose import SkipTest
from nose.plugins.attrib import attr
DESIGN_JSON = {
'language' : 'javascript',
'views' : {
'recent_posts' : {
'map' :
"""
function(doc) {
if (doc.date && doc.title) {
emit(doc.date, doc.title);
}
}
""".replace("\n", '')
}
}
}
DOCS_JSON = {
"bought-a-cat" : {
"title" : "Bought a Cat",
"body" : "I went to the pet store earlier and brought home a "
"little kitty",
"date" : "2009/01/30 18:04:11"
},
"biking" : {
"title" : "Biking",
"body" : "My biggest hobby is mountainbiking. The other day..",
"date" : "2009/01/30 18:04:11"
},
"hello-world" : {
"title" : "Hello World",
"body" : "Well hello and welcome to my new blog",
"date" : "2009/01/15 15:52:20"
}
}
@attr("view")
class ViewTest(ViewTestCase):
def setUp(self):
super(ViewTest, self).setUp()
self.skipIfMock()
mgr = self.cb.bucket_manager()
ret = mgr.design_create('blog', DESIGN_JSON, use_devmode=False)
self.assertTrue(ret.success)
self.assertTrue(self.cb.upsert_multi(DOCS_JSON, format=FMT_JSON).all_ok)
def test_simple_view(self):
ret = self.cb._view("blog", "recent_posts",
params={ 'stale' : 'false' })
self.assertTrue(ret.success)
rows = ret.value
self.assertIsInstance(rows, dict)
print(rows)
self.assertTrue(rows['total_rows'] >= 3)
self.assertTrue(len(rows['rows']) == rows['total_rows'])
def test_with_params(self):
ret = self.cb._view("blog", "recent_posts",
params={'limit':1})
self.assertTrue(ret.success)
rows = ret.value['rows']
self.assertEqual(len(rows), 1)
def test_with_strparam(self):
ret = self.cb._view("blog", "recent_posts", params='limit=2')
self.assertTrue(ret.success)
self.assertEqual(len(ret.value['rows']), 2)
def test_with_jparams(self):
jkey_pure = '2009/01/15 15:52:20'
ret = self.cb._view("blog", "recent_posts",
params={
'startkey' : jkey_pure,
'endkey' : jkey_pure,
'inclusive_end' : 'true'
})
print(ret)
self.assertTrue(ret.success)
rows = ret.value['rows']
self.assertTrue(len(rows) == 1)
single_row = rows[0]
self.assertEqual(single_row['id'], 'hello-world')
self.assertEqual(single_row['key'], jkey_pure)
jkey_pure = []
for v in DOCS_JSON.values():
curdate = v['date']
jkey_pure.append(curdate)
ret = self.cb._view("blog", "recent_posts",
params={
'keys' : jkey_pure
})
self.assertTrue(ret.success)
self.assertTrue(len(ret.value['rows']), 3)
for row in ret.value['rows']:
self.assertTrue(row['id'] in DOCS_JSON)
self.assertTrue(row['key'] in jkey_pure)
def test_missing_view(self):
self.assertRaises(HTTPException,
self.cb._view,
"nonexist", "designdoc")
def test_reject_ephemeral_attempt(self):
if not self._realserver_info:
raise SkipTest("Need real server")
admin=self.make_admin_connection()
bucket_name = 'ephemeral'
users=[('writer',('s3cr3t',[Role(name='data_reader', bucket='ephemeral'), Role(name='data_writer', bucket='ephemeral')])),
('reader',('s3cr3t',[Role(name='data_reader', bucket='ephemeral')])),
('viewer',('s3cr3t',[Role(name='views_reader', bucket='ephemeral'), Role(name='views_admin', bucket='ephemeral')]))]
user=users[2]
(userid, password, roles) = user[0],user[1][0],user[1][1]
# add user
try:
admin.bucket_delete(bucket_name)
except:
pass
try:
admin.bucket_create(name=bucket_name,
bucket_type='ephemeral',
ram_quota=100)
except HTTPException:
raise SkipTest("Unable to provision ephemeral bucket")
try:
admin.user_upsert(userid, AuthDomain.Local, password, roles)
admin.wait_ready(bucket_name, timeout=10)
conn_str = "couchbase://{0}/{1}".format(self.cluster_info.host, bucket_name)
bucket = Bucket(connection_string=conn_str,username=userid,password=password)
self.assertIsNotNone(bucket)
self.assertRaisesRegex(NotSupportedException, "Ephemeral", lambda: bucket.query("beer", "brewery_beers", streaming=True, limit=100))
finally:
admin.bucket_delete(bucket_name)
admin.user_remove(userid, AuthDomain.Local)
| apache-2.0 | 549,073,283,779,258,560 | 35.168675 | 144 | 0.560127 | false |
versatica/mediasoup | worker/deps/gyp/test/win/gyptest-link-enable-winrt-app-revision.py | 10 | 1220 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_application_type_revision works correctly.
"""
from __future__ import print_function
import TestGyp
import os
import sys
import struct
CHDIR = 'winrt-app-type-revision'
print('This test is not currently working on the bots: https://code.google.com/p/gyp/issues/detail?id=466')
sys.exit(0)
if (sys.platform == 'win32' and
int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2013):
test = TestGyp.TestGyp(formats=['msvs'])
test.run_gyp('winrt-app-type-revision.gyp', chdir=CHDIR)
test.build('winrt-app-type-revision.gyp', 'enable_winrt_81_revision_dll',
chdir=CHDIR)
# Revision is set to 8.2 which is invalid for 2013 projects so compilation
# must fail.
test.build('winrt-app-type-revision.gyp', 'enable_winrt_82_revision_dll',
chdir=CHDIR, status=1)
# Revision is set to an invalid value for 2013 projects so compilation
# must fail.
test.build('winrt-app-type-revision.gyp', 'enable_winrt_invalid_revision_dll',
chdir=CHDIR, status=1)
test.pass_test()
| isc | -7,206,061,954,289,748,000 | 27.372093 | 107 | 0.70082 | false |
carthach/essentia | test/src/unittests/audioproblems/test_discontinuitydetector.py | 1 | 4573 | #!/usr/bin/env python
# Copyright (C) 2006-2019 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia import array as esarr
class TestDiscontinuityDetector(TestCase):
def InitDiscontinuityDetector(self, **kwargs):
return DiscontinuityDetector(**kwargs)
def testZero(self):
# An array of zeros should return an empty list.
size = 1024
self.assertEqualVector(
self.InitDiscontinuityDetector(frameSize=size)(
esarr(numpy.zeros(size)))[0], esarr([]))
def testSquareWave(self):
# The algorithm should be robust to squarewaves if
# there are at least a few periods on the frame:
# f > ~200Hz for a window size of 512 @ 44.1kHz
# Try different frequencies.
fs = 44100
minFreq = 200 # Hz
maxFreq = 20000 # Hz
time = 10 # s
for f in numpy.linspace(minFreq, maxFreq, 5):
samplenum = int(fs / f)
samplenum -= samplenum % 2
waveTable = [0] * samplenum
waveTable[:samplenum // 2] = [1] * (samplenum // 2)
waveDur = len(waveTable) / 44100.
repetitions = int(time / waveDur)
input = waveTable * repetitions
self.assertEqualVector(
self.InitDiscontinuityDetector()(esarr(input))[0], esarr([]))
def testRegression(self, frameSize=512, hopSize=256):
fs = 44100
audio = MonoLoader(filename=join(testdata.audio_dir,
'recorded/cat_purrrr.wav'),
sampleRate=fs)()
originalLen = len(audio)
startJump = originalLen // 4
groundTruth = [startJump / float(fs)]
# Make sure that the artificial jump produces a prominent discontinuity.
if audio[startJump] > 0:
end = next(idx for idx, i in enumerate(audio[startJump:]) if i < -.3)
else:
end = next(idx for idx, i in enumerate(audio[startJump:]) if i > .3)
endJump = startJump + end
audio = esarr(numpy.hstack([audio[:startJump], audio[endJump:]]))
frameList = []
discontinuityDetector = self.InitDiscontinuityDetector(
frameSize=frameSize, hopSize=hopSize,
detectionThreshold=10)
for idx, frame in enumerate(FrameGenerator(
audio, frameSize=frameSize,
hopSize=hopSize, startFromZero=True)):
locs, _ = discontinuityDetector(frame)
if not len(locs) == 0:
for loc in locs:
frameList.append((idx * hopSize + loc) / float(fs))
self.assertAlmostEqualVector(frameList, groundTruth, 1e-7)
def testNoOverlap(self):
# The algorithm should also work without overlapping.
self.testRegression(frameSize=512, hopSize=512)
def testInvalidParam(self):
self.assertConfigureFails(DiscontinuityDetector(), {'order': 0})
self.assertConfigureFails(DiscontinuityDetector(), {'frameSize': 0})
self.assertConfigureFails(DiscontinuityDetector(), {'hopSize': 1024})
self.assertConfigureFails(DiscontinuityDetector(), {'kernelSize': -1})
self.assertConfigureFails(DiscontinuityDetector(), {'hopSize': 1024})
self.assertConfigureFails(DiscontinuityDetector(), {'detectionThreshold': -12})
self.assertConfigureFails(DiscontinuityDetector(), {'subFrameSize': 1024})
self.assertConfigureFails(DiscontinuityDetector(), {'frameSize': 64,
'hopSize': 32,
'order': 64})
suite = allTests(TestDiscontinuityDetector)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | 875,029,061,283,172,700 | 39.114035 | 87 | 0.61229 | false |
jordan-wright/talent-match | talent_match/models/invitation.py | 1 | 2820 | from talent_match import db
from modelUtils import modelToString
__author__ = 'Steve'
##
# This class represents an invitation from a talent seeker to a talent provider in order to fill the need for a
# a given skill that the provider possesses and the seeker needs for an activity.
##
class Invitation(db.Model):
__tablename__ = 'invitation'
id = db.Column(
db.INTEGER, primary_key=True, autoincrement=True, nullable=False, index=True)
invitingUserID = db.Column(db.INTEGER, db.ForeignKey('user.id'))
receivingUserID = db.Column(db.INTEGER, db.ForeignKey('user.id'))
activityID = db.Column(db.INTEGER, db.ForeignKey('activity.id'))
skillID = db.Column(db.INTEGER, db.ForeignKey('skill.id'))
# Project 3: Steve - adding relationships and navigation
skill = db.relationship(
'Skill', backref='invitation', uselist=False, lazy='joined')
activity = db.relationship(
'Activity', backref='invitation', uselist=False, lazy='joined')
invitingUser = db.relationship(
'User', uselist=False, lazy='joined', foreign_keys=[invitingUserID])
receivingUser = db.relationship(
'User', uselist=False, lazy='joined', foreign_keys=[receivingUserID])
accepted = db.Column(db.Boolean)
canceled = db.Column(db.Boolean, default=False)
# Project 4 - Steve - adding a completed column as well.
completed = db.Column(db.Boolean, default=False, nullable=False)
# Project 4 - Steve for Nick, I think - adding a requestSent column as
# well.
requestSent = db.Column(db.Boolean, default=False, nullable=True)
def __init__(self, activityID, skillID, invitingUserID, receivingUserID):
self.activityID = activityID
self.invitingUserID = invitingUserID
self.receivingUserID = receivingUserID
self.skillID = skillID
def __repr__(self):
return modelToString(self)
class InvitationRequest(db.Model):
__tablename__ = 'invitation_request'
id = db.Column(
db.INTEGER, primary_key=True, autoincrement=True, nullable=False, index=True)
requesterUserID = db.Column(db.INTEGER, db.ForeignKey('user.id'))
activityUserID = db.Column(db.INTEGER, db.ForeignKey('user.id'))
activityID = db.Column(db.INTEGER, db.ForeignKey('activity.id'))
accepted = db.Column(db.Boolean)
requesterUser = db.relationship(
'User', uselist=False, lazy='joined', foreign_keys=[requesterUserID])
activityUser = db.relationship(
'User', uselist=False, lazy='joined', foreign_keys=[activityUserID])
def __init__(self, activityID, requesterUserID, activityUserID):
self.activityID = activityID
self.requesterUserID = requesterUserID
self.activityUserID = activityUserID
def __repr__(self):
return modelToString(self)
| mit | 2,724,002,098,100,378,600 | 36.6 | 111 | 0.691135 | false |
Stanford-Online/edx-analytics-dashboard | analytics_dashboard/courses/presenters/programs.py | 1 | 2129 | from courses.presenters import BasePresenter
from django.core.cache import cache
class ProgramsPresenter(BasePresenter):
""" Presenter for the programs metadata. """
CACHE_KEY = 'programs'
NON_NULL_STRING_FIELDS = ['program_id', 'program_type', 'program_title']
@staticmethod
def filter_programs(all_programs, program_ids=None, course_ids=None):
"""Filter results to just the program IDs specified and then to just the programs that have
a course in the given course_ids list.
"""
if program_ids is None:
programs = all_programs
else:
programs = [program for program in all_programs if program['program_id'] in program_ids]
# Now apply course_ids filter
if course_ids is None:
return programs
return [program for program in programs if not set(program['course_ids']).isdisjoint(course_ids)]
def _get_all_programs(self):
"""
Returns all programs. If not cached, programs will be fetched
from the analytics data API.
"""
all_programs = cache.get(self.CACHE_KEY)
if all_programs is None:
all_programs = self.client.programs().programs()
all_programs = [
{field: ('' if val is None and field in self.NON_NULL_STRING_FIELDS else val)
for field, val in program.items()} for program in all_programs]
cache.set(self.CACHE_KEY, all_programs)
return all_programs
def get_programs(self, program_ids=None, course_ids=None):
"""
Returns programs that match those listed in program_ids. If
no program IDs provided, all programs will be returned.
"""
all_programs = self._get_all_programs()
filtered_programs = self.filter_programs(all_programs, program_ids=program_ids, course_ids=course_ids)
# sort by title by default with blank values at the end
filtered_programs = sorted(
filtered_programs,
key=lambda x: (not x['program_title'], x['program_title']))
return filtered_programs
| agpl-3.0 | -7,853,581,976,939,229,000 | 39.169811 | 110 | 0.633631 | false |
edx/edx-platform | openedx/core/djangoapps/content/course_overviews/management/commands/tests/test_simulate_publish.py | 4 | 7321 | """
Tests the simulate_publish management command.
"""
from django.core.management import call_command
from django.core.management.base import CommandError
from testfixtures import LogCapture
import lms.djangoapps.ccx.tasks
import openedx.core.djangoapps.content.course_overviews.signals
from openedx.core.djangoapps.content.course_overviews.management.commands.simulate_publish import Command, name_from_fn
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview, SimulateCoursePublishConfig
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import SwitchedSignal
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
LOGGER_NAME = 'simulate_publish'
class TestSimulatePublish(SharedModuleStoreTestCase):
"""Test simulate_publish, our fake course-publish signal command."""
@classmethod
def setUpClass(cls):
"""
Create courses in modulestore.
Modulestore signals are suppressed by ModuleStoreIsolationMixin, so this
method should not trigger things like CourseOverview creation.
"""
super().setUpClass()
cls.command = Command()
# org.0/course_0/Run_0
cls.course_key_1 = CourseFactory.create(default_store=ModuleStoreEnum.Type.mongo).id
# course-v1:org.1+course_1+Run_1
cls.course_key_2 = CourseFactory.create(default_store=ModuleStoreEnum.Type.split).id
# course-v1:org.2+course_2+Run_2
cls.course_key_3 = CourseFactory.create(default_store=ModuleStoreEnum.Type.split).id
def setUp(self):
"""
Most of this is isolating and re-initializing our signal handler. It
might look like you can move this to setUpClass, but be very careful if
doing so, to make sure side-effects don't leak out between tests.
"""
super().setUp()
# Instead of using the process global SignalHandler.course_published, we
# create our own SwitchedSignal to manually send to.
Command.course_published_signal = SwitchedSignal('test_course_publish')
# Course Overviews Handler
# pylint: disable=protected-access
Command.course_published_signal.connect(
openedx.core.djangoapps.content.course_overviews.signals._listen_for_course_publish
)
# CCX Handler
Command.course_published_signal.connect(
lms.djangoapps.ccx.tasks.course_published_handler
)
Command.course_published_signal.connect(self.sample_receiver_1)
Command.course_published_signal.connect(self.sample_receiver_2)
self.received_1 = []
self.received_2 = []
def tearDown(self):
"""Cleap up our signals."""
# pylint: disable=protected-access
Command.course_published_signal.disconnect(
openedx.core.djangoapps.content.course_overviews.signals._listen_for_course_publish
)
Command.course_published_signal.disconnect(
lms.djangoapps.ccx.tasks.course_published_handler
)
Command.course_published_signal.disconnect(self.sample_receiver_1)
Command.course_published_signal.disconnect(self.sample_receiver_2)
super().tearDown()
def options(self, **kwargs):
"""
Return an options dict that can be passed to self.command.handle()
Passed in **kwargs will override existing defaults. Most defaults are
the same as they are for running the management command manually (e.g.
dry_run is False, show_receivers is False), except that the list of
receivers is by default limited to the two that exist in this test
class. We do this to keep these tests faster and more self contained.
"""
default_receivers = [
name_from_fn(self.sample_receiver_1),
name_from_fn(self.sample_receiver_2),
]
default_options = dict(
show_receivers=False,
dry_run=False,
receivers=default_receivers,
courses=None,
delay=0,
force_lms=False,
skip_ccx=False,
args_from_database=False
)
default_options.update(kwargs)
return default_options
def test_specific_courses(self):
"""Test sending only to specific courses."""
self.command.handle(
**self.options(
courses=[str(self.course_key_1), str(self.course_key_2)]
)
)
assert self.course_key_1 in self.received_1
assert self.course_key_2 in self.received_1
assert self.course_key_3 not in self.received_1
assert self.received_1 == self.received_2
def test_specific_receivers(self):
"""Test sending only to specific receivers."""
self.command.handle(
**self.options(
receivers=[name_from_fn(self.sample_receiver_1)]
)
)
assert self.course_key_1 in self.received_1
assert self.course_key_2 in self.received_1
assert self.course_key_3 in self.received_1
assert self.received_2 == []
def test_course_overviews(self):
"""Integration test with CourseOverviews."""
assert CourseOverview.objects.all().count() == 0
# pylint: disable=protected-access
self.command.handle(
**self.options(
receivers=[
name_from_fn(openedx.core.djangoapps.content.course_overviews.signals._listen_for_course_publish)
]
)
)
assert CourseOverview.objects.all().count() == 3
assert self.received_1 == []
assert self.received_2 == []
def sample_receiver_1(self, sender, course_key, **kwargs): # pylint: disable=unused-argument
"""Custom receiver for testing."""
self.received_1.append(course_key)
def sample_receiver_2(self, sender, course_key, **kwargs): # pylint: disable=unused-argument
"""Custom receiver for testing."""
self.received_2.append(course_key)
def test_args_from_database(self):
"""Test management command arguments injected from config model."""
# Nothing in the database, should default to disabled
with self.assertRaisesRegex(CommandError, 'SimulateCourseConfigPublish is disabled.*'):
call_command('simulate_publish', '--args-from-database')
# Add a config
config = SimulateCoursePublishConfig.current()
config.arguments = '--delay 20 --dry-run'
config.enabled = True
config.save()
with LogCapture(LOGGER_NAME) as log:
call_command('simulate_publish')
log.check_present(
(
LOGGER_NAME, 'INFO',
"simulate_publish starting, dry-run={}, delay={} seconds".format('False', '0')
),
)
with LogCapture(LOGGER_NAME) as log:
call_command('simulate_publish', '--args-from-database')
log.check_present(
(
LOGGER_NAME, 'INFO',
"simulate_publish starting, dry-run={}, delay={} seconds".format('True', '20')
),
)
| agpl-3.0 | -3,991,949,623,043,305,500 | 38.788043 | 119 | 0.639257 | false |
potatolondon/reportato | reportato/tests.py | 1 | 13817 | # coding=utf-8
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.test import TestCase, RequestFactory
from mock import Mock, patch
from .reporters import ModelReporter, UndefinedField
from .utils import UnicodeWriter
from .views import BaseCSVGeneratorView
class ModelReporterMetaclassTestCase(TestCase):
def test_invalid_headers(self):
with self.assertRaises(FieldError) as exception:
class ThisShouldFail(ModelReporter):
class Meta:
model = Permission
custom_headers = {
'foo': 'Foo', 'span': 'Span', 'codename': 'Meh'
}
self.assertEqual(
exception.exception.message,
'Unknown header(s) (foo, span) specified for Permission'
)
def test_custom_header_with_non_selected_field(self):
with self.assertRaises(FieldError) as exception:
class ThisShouldFail(ModelReporter):
class Meta:
model = Permission
fields = ('name',)
custom_headers = {'codename': 'Meh'}
self.assertEqual(
exception.exception.message,
'Unknown header(s) (codename) specified for Permission'
)
# Test classes
class BaseUserReporter(ModelReporter):
class Meta:
model = get_user_model()
class UserReporterWithCustomHeaders(ModelReporter):
class Meta:
model = get_user_model()
custom_headers = {
'first_name': 'Christian name',
'last_name': 'Family name',
'email': 'Gmail address'
}
class PermissionReporterWithAllFields(ModelReporter):
class Meta:
model = Permission
class PermissionReporterWithSomeFields(ModelReporter):
class Meta:
model = Permission
fields = ('name', 'codename')
class PermissionReporterWithFieldsNotInTheModel(ModelReporter):
class Meta:
model = Permission
fields = ('name', 'codename', 'foo')
class PermissionReporterWithSomeFieldsAndCustomRenderer(ModelReporter):
class Meta:
model = Permission
fields = ('name', 'codename')
def get_codename_column(self, instance):
return instance.codename.replace('_', ' ').capitalize()
class PermissionReporterWithCustomHeaders(ModelReporter):
class Meta:
model = Permission
custom_headers = {
'id': 'Key',
'name': 'Foo',
}
class GroupReporter(ModelReporter):
class Meta:
model = Group
fields = ('name', 'permissions',)
class ModelReporterTestCase(TestCase):
def _create_users(self, _quantity=5):
for i in range(1, _quantity + 1):
first_name = 'Fred %s' % i
last_name = 'Bloggs %s' % i
username = 'foo%s' % i
email = '%[email protected]' % i
get_user_model().objects.create(username=username,
email=email, first_name=first_name, last_name=last_name)
def test_basic_reporter(self):
reporter = BaseUserReporter()
self.assertEqual(reporter.items.count(), 0)
def test_reporter_with_some_items(self):
self._create_users(_quantity=5)
reporter = BaseUserReporter()
self.assertEqual(reporter.items.count(), 5)
def test_reporter_with_fixed_queryset(self):
self._create_users(_quantity=10)
reporter = BaseUserReporter(get_user_model().objects.all()[:7])
self.assertEqual(reporter.items.count(), 7)
def test_reporter_gets_all_model_fields(self):
reporter = PermissionReporterWithAllFields()
self.assertEqual(
set(reporter.fields),
set(['codename', 'content_type', u'id', 'name'])
)
def test_reporter_gets_given_model_fields(self):
reporter = PermissionReporterWithSomeFields()
self.assertEqual(
reporter.fields,
('name', 'codename')
)
def test_reporter_with_fields_not_in_the_model(self):
reporter = PermissionReporterWithFieldsNotInTheModel()
self.assertEqual(
reporter.fields,
('name', 'codename', 'foo')
)
def test_default_headers(self):
reporter = PermissionReporterWithAllFields()
self.assertEqual(
set(reporter.get_header_row()),
set([u'Codename', u'Content type', u'Id', u'Name'])
)
def test_custom_headers(self):
reporter = PermissionReporterWithCustomHeaders()
self.assertEqual(
set(reporter.get_header_row()),
set([u'Codename', u'Content type', u'Key', u'Foo'])
)
def test_row_generation_with_all_fields(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithAllFields(permissions)
permission = permissions.get(codename='add_permission')
self.assertEqual(
reporter.get_row(permission),
{
'codename': u'add_permission', 'content_type': u'permission',
u'id': u'1', 'name': u'Can add permission',
}
)
def test_generate_all_rows_with_all_fields(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithAllFields(permissions)
self.assertEqual(
[row for row in reporter.get_rows()],
[
[u'1', u'Can add permission', u'permission', u'add_permission'],
[u'2', u'Can change permission', u'permission', u'change_permission'],
[u'3', u'Can delete permission', u'permission', u'delete_permission'],
]
)
def test_row_generation_with_some_fields(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithSomeFields(permissions)
permission = permissions.get(codename='add_permission')
self.assertEqual(
reporter.get_row(permission),
{'codename': 'add_permission', 'name': 'Can add permission'}
)
def test_undefined_field_raises_exception(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithFieldsNotInTheModel(permissions)
permission = permissions.get(codename='add_permission')
self.assertRaises(UndefinedField, reporter.get_row, permission)
def test_undefined_field_with_custom_method(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithFieldsNotInTheModel(permissions)
reporter.get_foo_column = lambda x: 'id-%s' % x.id
self.assertEqual(
[row for row in reporter.get_rows()],
[
['Can add permission', 'add_permission', 'id-1'],
['Can change permission', 'change_permission', 'id-2'],
['Can delete permission', 'delete_permission', 'id-3'],
]
)
def test_generate_all_rows_with_some_fields(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithSomeFields(permissions)
self.assertEqual(
[row for row in reporter.get_rows()],
[
['Can add permission', 'add_permission'],
['Can change permission', 'change_permission'],
['Can delete permission', 'delete_permission'],
]
)
def test_many_to_many_fields(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
self.assertEqual(Group.objects.count(), 0)
group = Group.objects.create(name='foo')
group.permissions.add(*permissions)
reporter = GroupReporter()
self.assertEqual(
[row for row in reporter.get_rows()],
[
[u'foo', u'auth | permission | Can add permission, auth | permission | Can change permission, auth | permission | Can delete permission'],
]
)
def test_custom_renderer(self):
ct = ContentType.objects.get_for_model(Permission)
permissions = Permission.objects.filter(content_type=ct)
reporter = PermissionReporterWithSomeFieldsAndCustomRenderer(permissions)
self.assertEqual(
[row for row in reporter.get_rows()],
[
['Can add permission', 'Add permission'],
['Can change permission', 'Change permission'],
['Can delete permission', 'Delete permission'],
]
)
def test_reporter_with_hidden_fields(self):
self._create_users()
reporter = BaseUserReporter(visible_fields=('first_name', 'last_name'))
self.assertEqual(['First name', 'Last name'], reporter.get_header_row())
self.assertEqual([row for row in reporter.get_rows()], [
['Fred 1', 'Bloggs 1'],
['Fred 2', 'Bloggs 2'],
['Fred 3', 'Bloggs 3'],
['Fred 4', 'Bloggs 4'],
['Fred 5', 'Bloggs 5']
])
def test_reporter_with_hidden_fields_and_custom_headers(self):
reporter = UserReporterWithCustomHeaders(visible_fields=('first_name', 'last_name'))
self.assertEqual(['Christian name', 'Family name'], reporter.get_header_row())
def test_default_field_renderer(self):
reporter = BaseUserReporter()
class MockUser(object):
number = 0
encoded_string = u'üníçođé þħíñgß'
user = MockUser()
self.assertEqual(reporter._default_field_renderer(user, 'number'), u'0')
self.assertEqual(
reporter._default_field_renderer(user, 'encoded_string'),
u'üníçođé þħíñgß'
)
class BaseCSVGeneratorViewTestCase(TestCase):
def test_get_reporter_class(self):
view = BaseCSVGeneratorView()
mock = Mock()
view.reporter_class = mock
self.assertEqual(view.get_reporter_class(), mock)
def test_get_reporter(self):
view = BaseCSVGeneratorView()
mock = Mock()
mock.return_value = 'foo' # we make sure we're instantiating the class
view.reporter_class = mock
view.get_queryset = lambda: [1, 2, 3]
self.assertEqual(view.get_reporter(), 'foo')
mock.assert_called_once_with([1, 2, 3])
def test_get_writter_class_default(self):
view = BaseCSVGeneratorView()
self.assertEqual(view.get_writer_class(), UnicodeWriter)
def test_get_writter_class(self):
view = BaseCSVGeneratorView()
mock = Mock()
view.writer_class = mock
self.assertEqual(view.get_writer_class(), mock)
def test_should_write_header(self):
view = BaseCSVGeneratorView()
self.assertTrue(view.should_write_header())
view.WRITE_HEADER = False
self.assertFalse(view.should_write_header())
def test_get_file_name_default(self):
view = BaseCSVGeneratorView()
self.assertEqual(view.get_file_name(), 'myreport.csv')
def test_get_file_name_non_default(self):
view = BaseCSVGeneratorView()
view.file_name = 'kinginthenorth.csv'
self.assertEqual(view.get_file_name(), 'kinginthenorth.csv')
def test_get_should_generate_csv(self):
view = BaseCSVGeneratorView()
view.write_csv = Mock()
request = RequestFactory().get('/')
with patch('reportato.views.HttpResponse') as http_response_patch:
http_response_patch.return_value = {}
response = view.get(request)
self.assertEqual(
response,
{'Content-Disposition': 'attachment; filename="myreport.csv"'}
)
view.write_csv.assert_called_once()
def test_write_csv_with_header(self):
view = BaseCSVGeneratorView()
writer_mock = Mock()
reporter_mock = Mock()
view.get_writer_class = lambda: writer_mock
view.get_reporter = lambda: reporter_mock
view.write_csv(Mock())
# check we rendered the headers and the rows
reporter_mock.get_header_row.assert_called_once()
reporter_mock.get_rows.assert_called_once()
# and that we wrote those things
writer_mock.return_value.writerow.assert_called_once_with(
reporter_mock.get_header_row())
writer_mock.return_value.writerows.assert_called_once_with(
reporter_mock.get_rows())
def test_write_csv_without_header(self):
view = BaseCSVGeneratorView()
writer_mock = Mock()
reporter_mock = Mock()
view.get_writer_class = lambda: writer_mock
view.get_reporter = lambda: reporter_mock
view.WRITE_HEADER = False
view.write_csv(Mock())
# check we rendered the headers and the rows
self.assertFalse(reporter_mock.get_header_row.called)
reporter_mock.get_rows.assert_called_once()
# and that we wrote those things
self.assertFalse(writer_mock.return_value.writerow.called)
writer_mock.return_value.writerows.assert_called_once_with(
reporter_mock.get_rows())
| bsd-2-clause | -3,281,002,157,787,489,000 | 31.694313 | 154 | 0.613322 | false |
kyrsjo/AcdOpti | src/acdOptiGui/infoFrames/GeometryInstance.py | 1 | 19652 | # -*- coding: utf8 -*-
#
# Copyright 2011 Kyrre Ness Sjøbæk
# This file is part of AcdOpti.
#
# AcdOpti is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AcdOpti is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AcdOpti. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gtk
import os
from InfoFrameComponent import InfoFrameComponent
from acdOpti.AcdOptiMeshInstance import AcdOptiMeshInstance
from acdOpti.AcdOptiExceptions import AcdOptiException_cubitTemplateFile_CUBITerror,\
AcdOptiException_geomInstance_nameError
#TODO: Persistent ordering of keys, same as in GeometryCollection
class GeometryInstance(InfoFrameComponent):
"""
Allows for manipulation of variables for one specific geometryInstance
"""
#Class object fields
geomInstance = None
__topLabels = None
__tableWidget = None
__labelCollection = None
__entryCollection = None
__checkCollection = None
__clearLockdownButton = None
__cloneButton = None
__exportButton = None
__generateButton = None
__meshButton = None
__scrolledWindow = None
def __init__(self,frameManager,geomInstance):
InfoFrameComponent.__init__(self, frameManager)
self.geomInstance = geomInstance
#Create GUI
self.baseWidget = gtk.VBox()
self.__topLabels = []
self.__topLabels.append(gtk.Label("Tag name"))
self.__topLabels.append(gtk.Label("Value"))
self.__topLabels.append(gtk.Label("Use default"))
self.__clearLockdownButton = gtk.Button(label="Clear lockdown")
self.__clearLockdownButton.connect("clicked", self.event_button_clearLockdown, None)
self.__cloneButton = gtk.Button(label="Clone this geometry instance (deep copy)")
self.__cloneButton.connect("clicked", self.event_button_clone, None)
self.__exportButton = gtk.Button(label="Export CUBIT journal to file...")
self.__exportButton.connect("clicked", self.event_button_export, None)
self.__generateButton = gtk.Button(label="Run CUBIT to generate solid")
self.__generateButton.connect("clicked", self.event_button_generate, None)
self.__meshButton = gtk.Button(label="Attach a mesh...")
self.__meshButton.connect("clicked", self.event_button_mesh)
self.updateTable()
self.__scrolledWindow = gtk.ScrolledWindow()
self.__scrolledWindow.set_policy(gtk.POLICY_NEVER,gtk.POLICY_AUTOMATIC)
self.__scrolledWindow.add_with_viewport(self.__tableWidget)
self.__scrolledWindow.set_shadow_type(gtk.SHADOW_NONE)
self.baseWidget.pack_start(self.__scrolledWindow, expand=True)
self.baseWidget.pack_start(self.__clearLockdownButton, expand=False)
self.baseWidget.pack_start(self.__cloneButton, expand=False)
self.baseWidget.pack_start(self.__exportButton, expand=False)
self.baseWidget.pack_start(self.__generateButton, expand=False)
self.baseWidget.pack_start(self.__meshButton, expand=False)
self.baseWidget.show_all()
def updateTable(self):
"""
Creates and/or (re-)fills self.__tableWidget
"""
numEntries = self.geomInstance.template.paramDefaults_len()
lockdown = self.geomInstance.lockdown
#Initialize __tableWidget
if not self.__tableWidget:
self.__tableWidget=gtk.Table(numEntries+1, 3, False)
self.__tableWidget.set_row_spacings(3)
self.__tableWidget.set_col_spacings(3)
self.__tableWidget.attach(self.__topLabels[0],
0,1,0,1,
xoptions=gtk.FILL,yoptions=gtk.FILL)
self.__tableWidget.attach(self.__topLabels[1],
1,2,0,1,
xoptions=gtk.FILL|gtk.EXPAND,yoptions=gtk.FILL)
self.__tableWidget.attach(self.__topLabels[2],
2,3,0,1,
xoptions=gtk.FILL,yoptions=gtk.FILL)
self.__labelCollection = {}
self.__entryCollection = {}
self.__checkCollection = {}
else:
#Clear anything that might be there from before
for k in self.geomInstance.template.paramDefaults_getKeys():
self.__tableWidget.remove(self.__labelCollection[k])
self.__tableWidget.remove(self.__entryCollection[k])
self.__tableWidget.remove(self.__checkCollection[k])
self.__labelCollection.clear()
self.__entryCollection.clear()
self.__checkCollection.clear()
#Create and attach the table entries
for (k,i) in zip(sorted(self.geomInstance.template.paramDefaults_getKeys()),
xrange(numEntries)):
self.__labelCollection[k]=lab=gtk.Label(k)
self.__tableWidget.attach(lab,0,1,i+1,i+2, xoptions=gtk.FILL, yoptions=gtk.FILL)
self.__entryCollection[k]=ent=gtk.Entry()
if k in self.geomInstance.templateOverrides_getKeys():
ent.set_text(self.geomInstance.templateOverrides_get(k))
if lockdown:
ent.set_sensitive(False)
else:
ent.set_text(self.geomInstance.template.paramDefaults_get(k))
ent.set_sensitive(False)
self.__tableWidget.attach(ent,1,2,i+1,i+2, xoptions=gtk.FILL|gtk.EXPAND, yoptions=gtk.FILL)
self.__checkCollection[k]=check=gtk.CheckButton()
if k in self.geomInstance.templateOverrides_getKeys():
check.set_active(False)
else:
check.set_active(True)
if lockdown:
check.set_sensitive(False)
check.connect("toggled", self.event_check_toggled, k) #Toggle first, then message handler
self.__tableWidget.attach(check,2,3,i+1,i+2, xoptions=gtk.FILL, yoptions=gtk.FILL)
#Update the lockdown button
if lockdown:
self.__clearLockdownButton.set_sensitive(True)
self.__generateButton.set_sensitive(False)
else:
self.__clearLockdownButton.set_sensitive(False)
self.__generateButton.set_sensitive(True)
self.__tableWidget.show_all()
self.frameManager.mainWindow.updateProjectExplorer()
def updateGeomInstance(self):
"""
Copies information from the on-screen form into the geomInstance.
Does NOT ask the geomInstance to write itself to file.
If geomInstance is in lockdown, do nothing.
"""
if self.geomInstance.lockdown:
return
for k in self.geomInstance.templateOverrides_getKeys():
self.geomInstance.templateOverrides_insert(k, self.__entryCollection[k].get_text())
def event_check_toggled(self, widget, data):
print "GeometryInstance::event_check_toggled(), data =", data
if widget.get_active():
#Checked
dia = gtk.Dialog("Entry unchecked", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock(
gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("Delete override \"" + data + "\" ?"))
dia.show_all()
response = dia.run()
if response == gtk.RESPONSE_YES:
#Delete
dia.destroy()
self.geomInstance.templateOverrides_del(data)
self.__entryCollection[data].set_sensitive(False)
self.__entryCollection[data].set_text(self.geomInstance.template.paramDefaults_get(data))
else:
#Abort
dia.destroy()
self.__checkCollection[data].set_active(False)
else:
#Unchecked
self.geomInstance.templateOverrides_insert(data, self.geomInstance.template.paramDefaults_get(data))
self.__entryCollection[data].set_sensitive(True)
def event_delete(self):
print "GeometryInstance::event_delete"
#Save to the geomInstance
self.updateGeomInstance()
#Ask the geomInstance to write itself to disk
self.geomInstance.write()
def event_button_clearLockdown(self,widget, data=None):
print "GeometryInstance::event_button_clearLockdown()"
self.geomInstance.clearLockdown()
self.updateTable()
def event_button_clone(self, widget,data=None):
print "GeometryInstance::event_button_clone()"
#Ask for the new geomInstance name
dia = gtk.Dialog("Please enter name of new geometry instance:", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dia.set_default_response(gtk.RESPONSE_OK)
nameBox = gtk.Entry()
nameBox.set_text(self.geomInstance.instName + "_clone")
dia.vbox.pack_start(nameBox)
dia.show_all()
response = dia.run()
cloneName = nameBox.get_text()
dia.destroy()
if response == gtk.RESPONSE_OK:
#Check for whitespace
if " " in cloneName:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot contain whitespace")
mDia.run()
mDia.destroy()
elif cloneName == "":
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot be empty")
mDia.run()
mDia.destroy()
elif cloneName in self.geomInstance.template.geomInstances:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name already in use")
mDia.run()
mDia.destroy()
#Everything OK: Try to attach the MeshInstance!
else:
self.geomInstance.template.cloneGeomInstance(self.geomInstance.instName, cloneName)
self.frameManager.mainWindow.updateProjectExplorer()
def event_button_export(self, widget, data=None):
print "GeometryInstance::event_button_export()"
self.updateGeomInstance()
(journal, extraKeys) = self.geomInstance.generateCubitJou()
#Check for extra keys
if len(extraKeys):
dia = gtk.Dialog("Extra keys in template", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock(
gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("Extra keys found in template, continue?\n" + str(extraKeys) ))
dia.show_all()
response = dia.run()
dia.destroy()
if not response == gtk.RESPONSE_YES:
#Stop now
return
#Ask where to save
chooser = gtk.FileChooserDialog(title="Export file",
parent=self.getBaseWindow(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("CUBIT journal file .jou")
filter.add_mime_type("text/plain")
filter.add_pattern("*.jou")
chooser.add_filter(filter)
response = chooser.run()
if response == gtk.RESPONSE_OK:
fname = chooser.get_filename()
if not fname.endswith(".jou"):
fname += ".jou"
if os.path.isfile(fname):
dia = gtk.Dialog("File already exists", chooser,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_NO, gtk.RESPONSE_NO,
gtk.STOCK_YES, gtk.RESPONSE_YES))
dia.set_default_response(gtk.RESPONSE_YES)
dia.vbox.pack_start(gtk.image_new_from_stock\
(gtk.STOCK_DIALOG_QUESTION,
gtk.ICON_SIZE_DIALOG))
dia.vbox.pack_start(gtk.Label("File already exists, overwrite?"))
dia.show_all()
response2 = dia.run()
dia.destroy()
if not response2 == gtk.RESPONSE_YES:
#Stop now!
print "GeometryInstance::event_button_export()::AbortOverwrite"
chooser.destroy() #I'm to lazy to implement a proper event loop
return
#File name free OR user clicked YES to overwrite
chooser.destroy()
print "GeometryInstance::event_button_export()::write"
ofile = open(fname,'w')
ofile.write(journal)
ofile.close()
else:
chooser.destroy()
def event_button_generate(self, widget, data=None):
print "GeometryInstance::event_button_generate()"
self.updateGeomInstance()
try:
self.geomInstance.generateGeometry()
except AcdOptiException_cubitTemplateFile_CUBITerror as e:
self.makePing()
md = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR,
gtk.BUTTONS_CLOSE, "Error during execution of CUBIT script, offending command:\n" + str(e.args[2]))
md.run()
md.destroy()
self.updateTable()
def event_button_mesh(self,widget,data=None):
print "GeometryInstance::event_button_mesh()"
dia = gtk.Dialog("Select mesh template", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dia.set_default_response(gtk.RESPONSE_OK)
#Setup the listModel
diaListModel = gtk.ListStore(str)
diaListDict = {}
for mt in self.geomInstance.template.project.meshTemplateCollection.meshTemplates:
mti = diaListModel.append([mt,])
diaListDict[mt] = gtk.TreeRowReference(diaListModel,diaListModel.get_path(mti))
diaTreeView = gtk.TreeView(diaListModel)
diaTreeViewCol = gtk.TreeViewColumn()
diaCellRender = gtk.CellRendererText()
diaTreeView.append_column(diaTreeViewCol)
diaTreeViewCol.pack_start(diaCellRender)
diaTreeViewCol.add_attribute(diaCellRender, 'text', 0)
diaTreeView.set_headers_visible(False)
dia.vbox.pack_start(diaTreeView)
dia.show_all()
response = dia.run()
#Get the answer
selected = diaTreeView.get_selection()
selIter = selected.get_selected()[1]
selPath = diaListModel.get_path(selIter)
meshTemplateName = None
for (mt, ref) in diaListDict.iteritems():
if selPath == ref.get_path():
meshTemplateName = mt
assert meshTemplateName
#Delete the dialog
dia.destroy()
if response == gtk.RESPONSE_OK:
#Ask for the mesh instance name
dia2 = gtk.Dialog("Please enter name of new mesh instance:", self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OK, gtk.RESPONSE_OK))
dia2.set_default_response(gtk.RESPONSE_OK)
nameBox = gtk.Entry()
nameBox.set_text(meshTemplateName)
nameBox.show()
dia2.vbox.pack_start(nameBox)
dia2.show_all()
response2 = dia2.run()
meshInstanceName = nameBox.get_text()
dia2.destroy()
if response2 == gtk.RESPONSE_OK:
#Check for whitespace
if " " in meshInstanceName:
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name cannot contain whitespace")
mDia.run()
mDia.destroy()
#Everything OK: Try to attach the MeshInstance!
else:
try:
self.geomInstance.addMeshInstance(meshTemplateName, meshInstanceName)
#self.frameManager.mainWindow.addMeshInstanceToGUI(self.geomInstance.instName, meshInstanceName)
self.frameManager.mainWindow.updateProjectExplorer()
except AcdOptiException_geomInstance_nameError:
#Nope
mDia = gtk.MessageDialog(self.getBaseWindow(),
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
"Name already in use")
mDia.run()
mDia.destroy()
| gpl-3.0 | -5,338,039,519,075,308,000 | 42.379691 | 134 | 0.555623 | false |
ssorgatem/video4fuze | GUI/AboutDiag.py | 6 | 1135 | # -*- coding: utf-8 -*-
"""
Module implementing About Dialog.
"""
import codecs
from PyQt4.QtGui import QDialog
from PyQt4.QtCore import QCoreApplication, SIGNAL, QString
from Ui_AboutDiag import Ui_Dialog
class AboutV4F(QDialog, Ui_Dialog):
"""
V4F about dialog
"""
def __init__(self, parent = None):
"""
Constructor
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.Appinfo.setText(self.Appinfo.text().replace("Info",QCoreApplication.applicationName()+" "+QCoreApplication.applicationVersion()))
try:
READMEfile = codecs.open("README.txt","rb", "utf-8")
README = READMEfile.read()
READMEfile.close()
except Exception, e:
README = unicode(e)
try:
LICENSEfile = open("LICENSE.html","rb")
LICENSE= LICENSEfile.read()
LICENSEfile.close()
except Exception, e:
LICENSE = unicode(e)
self.ReadmeText.setText(README)
self.LicenseText.setText(LICENSE)
self.connect(self.okButton, SIGNAL("clicked()"), self.accept)
| gpl-3.0 | 4,480,029,232,956,183,000 | 28.868421 | 142 | 0.6 | false |
Verizon/libcloud | libcloud/storage/base.py | 26 | 28320 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides base classes for working with storage
"""
# Backward compatibility for Python 2.5
from __future__ import with_statement
import os.path # pylint: disable-msg=W0404
import hashlib
from os.path import join as pjoin
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
import libcloud.utils.files
from libcloud.common.types import LibcloudError
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.storage.types import ObjectDoesNotExistError
__all__ = [
'Object',
'Container',
'StorageDriver',
'CHUNK_SIZE',
'DEFAULT_CONTENT_TYPE'
]
CHUNK_SIZE = 8096
# Default Content-Type which is sent when uploading an object if one is not
# supplied and can't be detected when using non-strict mode.
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
class Object(object):
"""
Represents an object (BLOB).
"""
def __init__(self, name, size, hash, extra, meta_data, container,
driver):
"""
:param name: Object name (must be unique per container).
:type name: ``str``
:param size: Object size in bytes.
:type size: ``int``
:param hash: Object hash.
:type hash: ``str``
:param container: Object container.
:type container: :class:`Container`
:param extra: Extra attributes.
:type extra: ``dict``
:param meta_data: Optional object meta data.
:type meta_data: ``dict``
:param driver: StorageDriver instance.
:type driver: :class:`StorageDriver`
"""
self.name = name
self.size = size
self.hash = hash
self.container = container
self.extra = extra or {}
self.meta_data = meta_data or {}
self.driver = driver
def get_cdn_url(self):
return self.driver.get_object_cdn_url(obj=self)
def enable_cdn(self, **kwargs):
return self.driver.enable_object_cdn(obj=self, **kwargs)
def download(self, destination_path, overwrite_existing=False,
delete_on_failure=True):
return self.driver.download_object(self, destination_path,
overwrite_existing,
delete_on_failure)
def as_stream(self, chunk_size=None):
return self.driver.download_object_as_stream(self, chunk_size)
def delete(self):
return self.driver.delete_object(self)
def __repr__(self):
return ('<Object: name=%s, size=%s, hash=%s, provider=%s ...>' %
(self.name, self.size, self.hash, self.driver.name))
class Container(object):
"""
Represents a container (bucket) which can hold multiple objects.
"""
def __init__(self, name, extra, driver):
"""
:param name: Container name (must be unique).
:type name: ``str``
:param extra: Extra attributes.
:type extra: ``dict``
:param driver: StorageDriver instance.
:type driver: :class:`StorageDriver`
"""
self.name = name
self.extra = extra or {}
self.driver = driver
def iterate_objects(self):
return self.driver.iterate_container_objects(container=self)
def list_objects(self):
return self.driver.list_container_objects(container=self)
def get_cdn_url(self):
return self.driver.get_container_cdn_url(container=self)
def enable_cdn(self, **kwargs):
return self.driver.enable_container_cdn(container=self, **kwargs)
def get_object(self, object_name):
return self.driver.get_object(container_name=self.name,
object_name=object_name)
def upload_object(self, file_path, object_name, extra=None, **kwargs):
return self.driver.upload_object(
file_path, self, object_name, extra=extra, **kwargs)
def upload_object_via_stream(self, iterator, object_name, extra=None,
**kwargs):
return self.driver.upload_object_via_stream(
iterator, self, object_name, extra=extra, **kwargs)
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
return self.driver.download_object(
obj, destination_path, overwrite_existing=overwrite_existing,
delete_on_failure=delete_on_failure)
def download_object_as_stream(self, obj, chunk_size=None):
return self.driver.download_object_as_stream(obj, chunk_size)
def delete_object(self, obj):
return self.driver.delete_object(obj)
def delete(self):
return self.driver.delete_container(self)
def __repr__(self):
return ('<Container: name=%s, provider=%s>'
% (self.name, self.driver.name))
class StorageDriver(BaseDriver):
"""
A base StorageDriver to derive from.
"""
connectionCls = ConnectionUserAndKey
name = None
hash_type = 'md5'
supports_chunked_encoding = False
# When strict mode is used, exception will be thrown if no content type is
# provided and none can be detected when uploading an object
strict_mode = False
def __init__(self, key, secret=None, secure=True, host=None, port=None,
**kwargs):
super(StorageDriver, self).__init__(key=key, secret=secret,
secure=secure, host=host,
port=port, **kwargs)
def iterate_containers(self):
"""
Return a generator of containers for the given account
:return: A generator of Container instances.
:rtype: ``generator`` of :class:`Container`
"""
raise NotImplementedError(
'iterate_containers not implemented for this driver')
def list_containers(self):
"""
Return a list of containers.
:return: A list of Container instances.
:rtype: ``list`` of :class:`Container`
"""
return list(self.iterate_containers())
def iterate_container_objects(self, container):
"""
Return a generator of objects for the given container.
:param container: Container instance
:type container: :class:`Container`
:return: A generator of Object instances.
:rtype: ``generator`` of :class:`Object`
"""
raise NotImplementedError(
'iterate_container_objects not implemented for this driver')
def list_container_objects(self, container):
"""
Return a list of objects for the given container.
:param container: Container instance.
:type container: :class:`Container`
:return: A list of Object instances.
:rtype: ``list`` of :class:`Object`
"""
return list(self.iterate_container_objects(container))
def get_container(self, container_name):
"""
Return a container instance.
:param container_name: Container name.
:type container_name: ``str``
:return: :class:`Container` instance.
:rtype: :class:`Container`
"""
raise NotImplementedError(
'get_object not implemented for this driver')
def get_container_cdn_url(self, container):
"""
Return a container CDN URL.
:param container: Container instance
:type container: :class:`Container`
:return: A CDN URL for this container.
:rtype: ``str``
"""
raise NotImplementedError(
'get_container_cdn_url not implemented for this driver')
def get_object(self, container_name, object_name):
"""
Return an object instance.
:param container_name: Container name.
:type container_name: ``str``
:param object_name: Object name.
:type object_name: ``str``
:return: :class:`Object` instance.
:rtype: :class:`Object`
"""
raise NotImplementedError(
'get_object not implemented for this driver')
def get_object_cdn_url(self, obj):
"""
Return an object CDN URL.
:param obj: Object instance
:type obj: :class:`Object`
:return: A CDN URL for this object.
:rtype: ``str``
"""
raise NotImplementedError(
'get_object_cdn_url not implemented for this driver')
def enable_container_cdn(self, container):
"""
Enable container CDN.
:param container: Container instance
:type container: :class:`Container`
:rtype: ``bool``
"""
raise NotImplementedError(
'enable_container_cdn not implemented for this driver')
def enable_object_cdn(self, obj):
"""
Enable object CDN.
:param obj: Object instance
:type obj: :class:`Object`
:rtype: ``bool``
"""
raise NotImplementedError(
'enable_object_cdn not implemented for this driver')
def download_object(self, obj, destination_path, overwrite_existing=False,
delete_on_failure=True):
"""
Download an object to the specified destination path.
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Full path to a file or a directory where the
incoming file will be saved.
:type destination_path: ``str``
:param overwrite_existing: True to overwrite an existing file,
defaults to False.
:type overwrite_existing: ``bool``
:param delete_on_failure: True to delete a partially downloaded file if
the download was not successful (hash
mismatch / file size).
:type delete_on_failure: ``bool``
:return: True if an object has been successfully downloaded, False
otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'download_object not implemented for this driver')
def download_object_as_stream(self, obj, chunk_size=None):
"""
Return a generator which yields object data.
:param obj: Object instance
:type obj: :class:`Object`
:param chunk_size: Optional chunk size (in bytes).
:type chunk_size: ``int``
"""
raise NotImplementedError(
'download_object_as_stream not implemented for this driver')
def upload_object(self, file_path, container, object_name, extra=None,
verify_hash=True, headers=None):
"""
Upload an object currently located on a disk.
:param file_path: Path to the object on disk.
:type file_path: ``str``
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param verify_hash: Verify hash
:type verify_hash: ``bool``
:param extra: Extra attributes (driver specific). (optional)
:type extra: ``dict``
:param headers: (optional) Additional request headers,
such as CORS headers. For example:
headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'}
:type headers: ``dict``
:rtype: :class:`Object`
"""
raise NotImplementedError(
'upload_object not implemented for this driver')
def upload_object_via_stream(self, iterator, container,
object_name,
extra=None,
headers=None):
"""
Upload an object using an iterator.
If a provider supports it, chunked transfer encoding is used and you
don't need to know in advance the amount of data to be uploaded.
Otherwise if a provider doesn't support it, iterator will be exhausted
so a total size for data to be uploaded can be determined.
Note: Exhausting the iterator means that the whole data must be
buffered in memory which might result in memory exhausting when
uploading a very large object.
If a file is located on a disk you are advised to use upload_object
function which uses fs.stat function to determine the file size and it
doesn't need to buffer whole object in the memory.
:param iterator: An object which implements the iterator interface.
:type iterator: :class:`object`
:param container: Destination container.
:type container: :class:`Container`
:param object_name: Object name.
:type object_name: ``str``
:param extra: (optional) Extra attributes (driver specific). Note:
This dictionary must contain a 'content_type' key which represents
a content type of the stored object.
:type extra: ``dict``
:param headers: (optional) Additional request headers,
such as CORS headers. For example:
headers = {'Access-Control-Allow-Origin': 'http://mozilla.com'}
:type headers: ``dict``
:rtype: ``object``
"""
raise NotImplementedError(
'upload_object_via_stream not implemented for this driver')
def delete_object(self, obj):
"""
Delete an object.
:param obj: Object instance.
:type obj: :class:`Object`
:return: ``bool`` True on success.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_object not implemented for this driver')
def create_container(self, container_name):
"""
Create a new container.
:param container_name: Container name.
:type container_name: ``str``
:return: Container instance on success.
:rtype: :class:`Container`
"""
raise NotImplementedError(
'create_container not implemented for this driver')
def delete_container(self, container):
"""
Delete a container.
:param container: Container instance
:type container: :class:`Container`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
raise NotImplementedError(
'delete_container not implemented for this driver')
def _get_object(self, obj, callback, callback_kwargs, response,
success_status_code=None):
"""
Call passed callback and start transfer of the object'
:param obj: Object instance.
:type obj: :class:`Object`
:param callback: Function which is called with the passed
callback_kwargs
:type callback: :class:`function`
:param callback_kwargs: Keyword arguments which are passed to the
callback.
:type callback_kwargs: ``dict``
:param response: Response instance.
:type response: :class:`Response`
:param success_status_code: Status code which represents a successful
transfer (defaults to httplib.OK)
:type success_status_code: ``int``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
success_status_code = success_status_code or httplib.OK
if response.status == success_status_code:
return callback(**callback_kwargs)
elif response.status == httplib.NOT_FOUND:
raise ObjectDoesNotExistError(object_name=obj.name,
value='', driver=self)
raise LibcloudError(value='Unexpected status code: %s' %
(response.status),
driver=self)
def _save_object(self, response, obj, destination_path,
overwrite_existing=False, delete_on_failure=True,
chunk_size=None):
"""
Save object to the provided path.
:param response: RawResponse instance.
:type response: :class:`RawResponse`
:param obj: Object instance.
:type obj: :class:`Object`
:param destination_path: Destination directory.
:type destination_path: ``str``
:param delete_on_failure: True to delete partially downloaded object if
the download fails.
:type delete_on_failure: ``bool``
:param overwrite_existing: True to overwrite a local path if it already
exists.
:type overwrite_existing: ``bool``
:param chunk_size: Optional chunk size
(defaults to ``libcloud.storage.base.CHUNK_SIZE``, 8kb)
:type chunk_size: ``int``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
chunk_size = chunk_size or CHUNK_SIZE
base_name = os.path.basename(destination_path)
if not base_name and not os.path.exists(destination_path):
raise LibcloudError(
value='Path %s does not exist' % (destination_path),
driver=self)
if not base_name:
file_path = pjoin(destination_path, obj.name)
else:
file_path = destination_path
if os.path.exists(file_path) and not overwrite_existing:
raise LibcloudError(
value='File %s already exists, but ' % (file_path) +
'overwrite_existing=False',
driver=self)
stream = libcloud.utils.files.read_in_chunks(response, chunk_size)
try:
data_read = next(stream)
except StopIteration:
# Empty response?
return False
bytes_transferred = 0
with open(file_path, 'wb') as file_handle:
while len(data_read) > 0:
file_handle.write(b(data_read))
bytes_transferred += len(data_read)
try:
data_read = next(stream)
except StopIteration:
data_read = ''
if int(obj.size) != int(bytes_transferred):
# Transfer failed, support retry?
if delete_on_failure:
try:
os.unlink(file_path)
except Exception:
pass
return False
return True
def _upload_object(self, object_name, content_type, upload_func,
upload_func_kwargs, request_path, request_method='PUT',
headers=None, file_path=None, iterator=None):
"""
Helper function for setting common request headers and calling the
passed in callback which uploads an object.
"""
headers = headers or {}
if file_path and not os.path.exists(file_path):
raise OSError('File %s does not exist' % (file_path))
if iterator is not None and not hasattr(iterator, 'next') and not \
hasattr(iterator, '__next__'):
raise AttributeError('iterator object must implement next() ' +
'method.')
if not content_type:
if file_path:
name = file_path
else:
name = object_name
content_type, _ = libcloud.utils.files.guess_file_mime_type(name)
if not content_type:
if self.strict_mode:
raise AttributeError('File content-type could not be '
'guessed and no content_type value '
'is provided')
else:
# Fallback to a content-type
content_type = DEFAULT_CONTENT_TYPE
file_size = None
if iterator:
if self.supports_chunked_encoding:
headers['Transfer-Encoding'] = 'chunked'
upload_func_kwargs['chunked'] = True
else:
# Chunked transfer encoding is not supported. Need to buffer
# all the data in memory so we can determine file size.
iterator = libcloud.utils.files.read_in_chunks(
iterator=iterator)
data = libcloud.utils.files.exhaust_iterator(iterator=iterator)
file_size = len(data)
upload_func_kwargs['data'] = data
else:
file_size = os.path.getsize(file_path)
upload_func_kwargs['chunked'] = False
if file_size is not None and 'Content-Length' not in headers:
headers['Content-Length'] = file_size
headers['Content-Type'] = content_type
response = self.connection.request(request_path,
method=request_method, data=None,
headers=headers, raw=True)
upload_func_kwargs['response'] = response
success, data_hash, bytes_transferred = upload_func(
**upload_func_kwargs)
if not success:
raise LibcloudError(
value='Object upload failed, Perhaps a timeout?', driver=self)
result_dict = {'response': response, 'data_hash': data_hash,
'bytes_transferred': bytes_transferred}
return result_dict
def _upload_data(self, response, data, calculate_hash=True):
"""
Upload data stored in a string.
:param response: RawResponse object.
:type response: :class:`RawResponse`
:param data: Data to upload.
:type data: ``str``
:param calculate_hash: True to calculate hash of the transferred data.
(defaults to True).
:type calculate_hash: ``bool``
:return: First item is a boolean indicator of success, second
one is the uploaded data MD5 hash and the third one
is the number of transferred bytes.
:rtype: ``tuple``
"""
bytes_transferred = 0
data_hash = None
if calculate_hash:
data_hash = self._get_hash_function()
data_hash.update(b(data))
try:
response.connection.connection.send(b(data))
except Exception:
# TODO: let this exception propagate
# Timeout, etc.
return False, None, bytes_transferred
bytes_transferred = len(data)
if calculate_hash:
data_hash = data_hash.hexdigest()
return True, data_hash, bytes_transferred
def _stream_data(self, response, iterator, chunked=False,
calculate_hash=True, chunk_size=None, data=None):
"""
Stream a data over an http connection.
:param response: RawResponse object.
:type response: :class:`RawResponse`
:param response: An object which implements an iterator interface
or a File like object with read method.
:type iterator: :class:`object`
:param chunked: True if the chunked transfer encoding should be used
(defauls to False).
:type chunked: ``bool``
:param calculate_hash: True to calculate hash of the transferred data.
(defauls to True).
:type calculate_hash: ``bool``
:param chunk_size: Optional chunk size (defaults to ``CHUNK_SIZE``)
:type chunk_size: ``int``
:rtype: ``tuple``
:return: First item is a boolean indicator of success, second
one is the uploaded data MD5 hash and the third one
is the number of transferred bytes.
"""
chunk_size = chunk_size or CHUNK_SIZE
data_hash = None
if calculate_hash:
data_hash = self._get_hash_function()
generator = libcloud.utils.files.read_in_chunks(iterator, chunk_size,
fill_size=True)
bytes_transferred = 0
try:
chunk = next(generator)
except StopIteration:
# Special case when StopIteration is thrown on the first iteration
# create a 0-byte long object
chunk = ''
if chunked:
response.connection.connection.send(b('%X\r\n' %
(len(chunk))))
response.connection.connection.send(chunk)
response.connection.connection.send(b('\r\n'))
response.connection.connection.send(b('0\r\n\r\n'))
else:
response.connection.connection.send(chunk)
return True, data_hash.hexdigest(), bytes_transferred
while len(chunk) > 0:
try:
if chunked:
response.connection.connection.send(b('%X\r\n' %
(len(chunk))))
response.connection.connection.send(b(chunk))
response.connection.connection.send(b('\r\n'))
else:
response.connection.connection.send(b(chunk))
except Exception:
# TODO: let this exception propagate
# Timeout, etc.
return False, None, bytes_transferred
bytes_transferred += len(chunk)
if calculate_hash:
data_hash.update(b(chunk))
try:
chunk = next(generator)
except StopIteration:
chunk = ''
if chunked:
response.connection.connection.send(b('0\r\n\r\n'))
if calculate_hash:
data_hash = data_hash.hexdigest()
return True, data_hash, bytes_transferred
def _upload_file(self, response, file_path, chunked=False,
calculate_hash=True):
"""
Upload a file to the server.
:type response: :class:`RawResponse`
:param response: RawResponse object.
:type file_path: ``str``
:param file_path: Path to a local file.
:type iterator: :class:`object`
:param response: An object which implements an iterator interface (File
object, etc.)
:rtype: ``tuple``
:return: First item is a boolean indicator of success, second
one is the uploaded data MD5 hash and the third one
is the number of transferred bytes.
"""
with open(file_path, 'rb') as file_handle:
success, data_hash, bytes_transferred = (
self._stream_data(
response=response,
iterator=iter(file_handle),
chunked=chunked,
calculate_hash=calculate_hash))
return success, data_hash, bytes_transferred
def _get_hash_function(self):
"""
Return instantiated hash function for the hash type supported by
the provider.
"""
try:
func = getattr(hashlib, self.hash_type)()
except AttributeError:
raise RuntimeError('Invalid or unsupported hash type: %s' %
(self.hash_type))
return func
| apache-2.0 | -1,782,538,234,636,170,000 | 32.835125 | 79 | 0.573129 | false |
google/mozc | src/data_manager/gen_data_version.py | 2 | 2665 | # -*- coding: utf-8 -*-
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate the Mozc data version string.
Data version consists of three components:
<ENGINE_VERSION>.<DATA_VERSION>.<TAG>
Here, <TAG> is any string to distinguish data set.
"""
import optparse
import re
def _ParseOption():
"""Parse command line options."""
parser = optparse.OptionParser()
parser.add_option('--tag', dest='tag')
parser.add_option('--mozc_version_template', dest='mozc_version_template')
parser.add_option('--data_version_override', dest='data_version_override')
parser.add_option('--output', dest='output')
return parser.parse_args()[0]
def main():
opts = _ParseOption()
data = {}
with open(opts.mozc_version_template, 'r') as f:
for line in f:
matchobj = re.match(r'(\w+) *= *(.*)', line.strip())
if matchobj:
key = matchobj.group(1)
value = matchobj.group(2)
data[key] = value
if opts.data_version_override:
data['DATA_VERSION'] = opts.data_version_override
with open(opts.output, 'w') as f:
f.write('.'.join((data['ENGINE_VERSION'], data['DATA_VERSION'], opts.tag)))
if __name__ == '__main__':
main()
| bsd-3-clause | 2,972,118,535,395,036,000 | 36.013889 | 79 | 0.721576 | false |
tic-ull/gedea | src/miprimeraplicacion_django/django_cas/views.py | 1 | 3479 | """CAS login/logout replacement views"""
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponseForbidden
from urllib.parse import urlencode
from urllib.parse import urljoin
__all__ = ['login', 'logout']
def _service_url(request, redirect_to=None):
"""Generates application service URL for CAS"""
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
service = protocol + host + request.path
if redirect_to:
if '?' in service:
service += '&'
else:
service += '?'
service += urlencode({REDIRECT_FIELD_NAME: redirect_to})
return service
def _redirect_url(request):
"""Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
"""
next = request.GET.get(REDIRECT_FIELD_NAME)
if not next:
if settings.CAS_IGNORE_REFERER:
next = settings.CAS_REDIRECT_URL
else:
next = request.META.get('HTTP_REFERER', settings.CAS_REDIRECT_URL)
prefix = (('http://', 'https://')[request.is_secure()] +
request.get_host())
if next.startswith(prefix):
next = next[len(prefix):]
return next
def _login_url(service):
"""Generates CAS login URL"""
params = {'service': service}
if settings.CAS_EXTRA_LOGIN_PARAMS:
params.update(settings.CAS_EXTRA_LOGIN_PARAMS)
return urljoin(settings.CAS_SERVER_URL, 'login') + '?' + urlencode(params)
def _logout_url(request, next_page=None):
"""Generates CAS logout URL"""
url = urljoin(settings.CAS_SERVER_URL, 'logout')
if next_page:
protocol = ('http://', 'https://')[request.is_secure()]
host = request.get_host()
url += '?' + urlencode({'url': protocol + host + next_page})
return url
def login(request, next_page=None, required=False):
"""Forwards to CAS login URL or verifies CAS ticket"""
if not next_page:
next_page = _redirect_url(request)
if request.user.is_authenticated():
message = "You are logged in as %s." % request.user.username
messages.success(request, message)
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
service = _service_url(request, next_page)
if ticket:
from django.contrib import auth
user = auth.authenticate(ticket=ticket, service=service, request=request)
if user is not None:
auth.login(request, user)
name = user.first_name or user.username
message = "Login succeeded. Welcome, %s." % name
messages.success(request, message)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
return HttpResponseRedirect(_login_url(service))
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
return HttpResponseRedirect(_login_url(service))
def logout(request, next_page=None):
"""Redirects to CAS logout page"""
from django.contrib.auth import logout
logout(request)
if not next_page:
next_page = _redirect_url(request)
if settings.CAS_LOGOUT_COMPLETELY:
return HttpResponseRedirect(_logout_url(request, next_page))
else:
return HttpResponseRedirect(next_page)
| gpl-3.0 | 7,880,676,389,655,834,000 | 31.820755 | 81 | 0.63639 | false |
bionoid/kivy | kivy/tests/test_animations.py | 17 | 3847 | '''
Animations tests
================
'''
import unittest
import gc
from time import time, sleep
from kivy.animation import Animation, AnimationTransition
from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.graphics import Scale
from kivy.weakproxy import WeakProxy
class AnimationTestCase(unittest.TestCase):
def sleep(self, t):
start = time()
while time() < start + t:
sleep(.01)
Clock.tick()
def setUp(self):
self.assertEqual(len(Animation._instances), 0)
self.a = Animation(x=100, d=1, t='out_bounce')
self.w = Widget()
def tearDown(self):
self.assertEqual(len(Animation._instances), 0)
def test_start_animation(self):
self.a.start(self.w)
self.sleep(1.5)
self.assertAlmostEqual(self.w.x, 100)
def test_animation_duration_0(self):
a = Animation(x=100, d=0)
a.start(self.w)
self.sleep(.5)
def test_stop_animation(self):
self.a.start(self.w)
self.sleep(.5)
self.a.stop(self.w)
self.assertNotAlmostEqual(self.w.x, 100)
self.assertNotAlmostEqual(self.w.x, 0)
def test_stop_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w)
def test_stop_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w, 'x')
def test_duration(self):
self.assertEqual(self.a.duration, 1)
def test_transition(self):
self.assertEqual(self.a.transition, AnimationTransition.out_bounce)
def test_animated_properties(self):
self.assertEqual(self.a.animated_properties['x'], 100)
def test_animated_instruction(self):
instruction = Scale(3)
self.a.start(instruction)
self.assertEqual(self.a.animated_properties['x'], 100)
self.assertAlmostEqual(instruction.x, 3)
self.sleep(1.5)
self.assertAlmostEqual(instruction.x, 100)
def test_weakref(self):
widget = Widget()
anim = Animation(x=100)
anim.start(widget.proxy_ref)
del widget
gc.collect()
try:
self.sleep(1.)
except ReferenceError:
pass
class SequentialAnimationTestCase(unittest.TestCase):
def sleep(self, t):
start = time()
while time() < start + t:
sleep(.01)
Clock.tick()
def setUp(self):
self.assertEqual(len(Animation._instances), 0)
self.a = Animation(x=100, d=1, t='out_bounce')
self.a += Animation(x=0, d=1, t='out_bounce')
self.w = Widget()
def tearDown(self):
self.assertEqual(len(Animation._instances), 0)
def test_cancel_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.cancel_all(self.w)
def test_cancel_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.cancel_all(self.w, 'x')
def test_stop_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w)
def test_stop_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w, 'x')
def _test_on_progress(self, anim, widget, progress):
self._on_progress_called = True
def _test_on_complete(self, anim, widget):
self._on_complete_called = True
def test_events(self):
self._on_progress_called = False
self._on_complete_called = False
self.a.bind(on_progress=self._test_on_progress,
on_complete=self._test_on_complete)
self.a.start(self.w)
self.sleep(.5)
self.assertTrue(self._on_progress_called)
self.sleep(2)
self.assertTrue(self._on_progress_called)
self.assertTrue(self._on_complete_called)
| mit | -7,741,140,415,466,226,000 | 26.478571 | 75 | 0.597868 | false |
bros-bioinfo/bros-bioinfo.github.io | COURS/M1/SEMESTRE1/ALGO_PROG/ALGO/tableau.py | 1 | 2790 | #!/usr/bin/env python
# coding: utf-8
def creer_pile():
return[]
def creer_tableau():
indice = -1
tableau1 = creer_pile()
empiler(tableau1, indice)
return tableau1
def empiler(pile, element):
pile.append(element)
def depiler(pile):
return pile.pop()
def inserer_element(T, id, e):
sommet = depiler(T)
empiler(T, sommet)
diff = id - sommet
if diff <= 0:
print("Votre indice est deja existant")
return
i = 1
while i < diff:
# on veut insérer ce qu'il manque donc ce qu'il y a entre les bornes (Ex on a un id de 4 et on veut aller jusqu'à 7 - on insère 5 et 6)
empiler(T, "vide")
empiler(T, sommet + i) # TODO:INCREMENTER DE 1 la nouvelle liste
i += 1
empiler(T, e)
empiler(T, id)
def supprimer_element(T, id):
P2 = creer_pile()
sommet = depiler(T)
compteur = 0
while sommet != id:
element = depiler(T)
empiler(P2, sommet)
empiler(P2, element)
sommet = depiler(T)
compteur = compteur + 1
depiler(T) # on vire l'element, l'id etant deja depile
i = 0
if P2 != []:
while i < compteur:
element = depiler(P2)
sommet = depiler(P2) - 1
empiler(T, element)
empiler(T, sommet)
i += 1
def remplacer_element(T, id, e):
P2 = creer_pile()
sommet = depiler(T)
compteur = 0
while sommet != id:
element = depiler(T)
empiler(P2, sommet)
empiler(P2, element)
sommet = depiler(T)
compteur = compteur + 1
depiler(T) # on vire l'element, l'id etant deja depile
empiler(T, e)
empiler(T, sommet)
i = 0
if P2 != []:
while i < compteur:
element = depiler(P2)
sommet = depiler(P2)
empiler(T, element)
empiler(T, sommet)
i += 1
def obtenir_element(T, id):
P2 = creer_pile()
sommet = depiler(T)
compteur = 0
while sommet != id:
element = depiler(T)
empiler(P2, sommet)
empiler(P2, element)
sommet = depiler(T)
compteur = compteur + 1
elementcherche = depiler(T) # on vire l'element, l'id etant deja depile
empiler(T, elementcherche)
empiler(T, sommet)
i = 0
if P2 != []:
while i < compteur:
element = depiler(P2)
sommet = depiler(P2)
empiler(T, element)
empiler(T, sommet)
i += 1
return elementcherche
T1 = creer_tableau()
e = "COUCOU"
inserer_element(T1, 4, "BATEAU")
inserer_element(T1, 6, "TEST")
inserer_element(T1, 8, e)
print T1
supprimer_element(T1, 4)
print T1
remplacer_element(T1, 7, "BLABLA")
print T1
test = obtenir_element(T1, 7)
print test
| mit | -4,218,748,943,243,527,700 | 21.119048 | 143 | 0.556154 | false |
mlperf/training_results_v0.6 | Google/benchmarks/transformer/implementations/tpu-v3-256-transformer/dataset_preproc/data_generators/cifar.py | 7 | 16305 | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CIFAR."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
import six
from six.moves import cPickle
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import image_utils
from tensor2tensor.data_generators import mnist
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
# URLs and filenames for CIFAR data.
_CIFAR10_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
_CIFAR10_PREFIX = "cifar-10-batches-py/"
_CIFAR10_TRAIN_FILES = [
"data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4",
"data_batch_5"
]
_CIFAR10_TEST_FILES = ["test_batch"]
_CIFAR10_IMAGE_SIZE = _CIFAR100_IMAGE_SIZE = 32
_CIFAR100_URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
_CIFAR100_PREFIX = "cifar-100-python/"
_CIFAR100_TRAIN_FILES = ["train"]
_CIFAR100_TEST_FILES = ["test"]
def _get_cifar(directory, url):
"""Download and extract CIFAR to directory unless it is there."""
filename = os.path.basename(url)
path = generator_utils.maybe_download(directory, filename, url)
tarfile.open(path, "r:gz").extractall(directory)
def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0):
"""Image generator for CIFAR-10 and 100.
Args:
cifar_version: string; one of "cifar10" or "cifar100"
tmp_dir: path to temporary storage directory.
training: a Boolean; if true, we use the train set, otherwise the test set.
how_many: how many images and labels to generate.
start_from: from which image to start.
Returns:
An instance of image_generator that produces CIFAR-10 images and labels.
"""
if cifar_version == "cifar10":
url = _CIFAR10_URL
train_files = _CIFAR10_TRAIN_FILES
test_files = _CIFAR10_TEST_FILES
prefix = _CIFAR10_PREFIX
image_size = _CIFAR10_IMAGE_SIZE
label_key = "labels"
elif cifar_version == "cifar100" or cifar_version == "cifar20":
url = _CIFAR100_URL
train_files = _CIFAR100_TRAIN_FILES
test_files = _CIFAR100_TEST_FILES
prefix = _CIFAR100_PREFIX
image_size = _CIFAR100_IMAGE_SIZE
if cifar_version == "cifar100":
label_key = "fine_labels"
else:
label_key = "coarse_labels"
_get_cifar(tmp_dir, url)
data_files = train_files if training else test_files
all_images, all_labels = [], []
for filename in data_files:
path = os.path.join(tmp_dir, prefix, filename)
with tf.gfile.Open(path, "rb") as f:
if six.PY2:
data = cPickle.load(f)
else:
data = cPickle.load(f, encoding="latin1")
images = data["data"]
num_images = images.shape[0]
images = images.reshape((num_images, 3, image_size, image_size))
all_images.extend([
np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images)
])
labels = data[label_key]
all_labels.extend([labels[j] for j in range(num_images)])
return image_utils.image_generator(
all_images[start_from:start_from + how_many],
all_labels[start_from:start_from + how_many])
@registry.register_problem
class ImageCifar10Tune(mnist.ImageMnistTune):
"""Cifar-10 Tune."""
@property
def num_channels(self):
return 3
@property
def class_labels(self):
return [
"airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
"ship", "truck"
]
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
if mode == tf.estimator.ModeKeys.TRAIN:
image = image_utils.cifar_image_augmentation(image)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar10", tmp_dir, True, 48000)
else:
return cifar_generator("cifar10", tmp_dir, True, 2000, 48000)
@registry.register_problem
class ImageCifar10(ImageCifar10Tune):
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar10", tmp_dir, True, 50000)
else:
return cifar_generator("cifar10", tmp_dir, False, 10000)
@registry.register_problem
class ImageCifar10Plain(ImageCifar10):
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
@registry.register_problem
class ImageCifar10PlainGen(ImageCifar10Plain):
"""CIFAR-10 32x32 for image generation without standardization preprep."""
def dataset_filename(self):
return "image_cifar10_plain" # Reuse CIFAR-10 plain data.
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
example["inputs"] = tf.to_int64(example["inputs"])
return example
@registry.register_problem
class ImageCifar10PlainRandomShift(ImageCifar10Plain):
"""CIFAR-10 32x32 for image generation with random shift data-augmentation."""
def dataset_filename(self):
return "image_cifar10_plain" # Reuse CIFAR-10 plain data.
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
example["inputs"] = tf.to_int64(example["inputs"])
if mode == tf.estimator.ModeKeys.TRAIN:
example["inputs"] = image_utils.random_shift(
example["inputs"], wsr=0.1, hsr=0.1)
return example
@registry.register_problem
class ImageCifar10PlainGenDmol(ImageCifar10PlainGen):
"""Discretized mixture of logistics problem."""
def dataset_filename(self):
return "image_cifar10_plain" # Reuse CIFAR-10 plain data.
def eval_metrics(self):
return [
metrics.Metrics.DMOL_PERPLEXITY
]
@registry.register_problem
class ImageCifar10Plain8(ImageCifar10):
"""CIFAR-10 rescaled to 8x8 for output: Conditional image generation."""
def dataset_filename(self):
return "image_cifar10_plain" # Reuse CIFAR-10 plain data.
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image = image_utils.resize_by_area(image, 8)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
@registry.register_problem
class Img2imgCifar10(ImageCifar10):
"""CIFAR-10 rescaled to 8x8 for input and 32x32 for output."""
def dataset_filename(self):
return "image_cifar10_plain" # Reuse CIFAR-10 plain data.
def preprocess_example(self, example, unused_mode, unused_hparams):
inputs = example["inputs"]
# For Img2Img resize input and output images as desired.
example["inputs"] = image_utils.resize_by_area(inputs, 8)
example["targets"] = image_utils.resize_by_area(inputs, 32)
return example
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": ("image:identity", 256)}
p.target_modality = ("image:identity", 256)
p.batch_size_multiplier = 256
p.input_space_id = 1
p.target_space_id = 1
@registry.register_problem
class ImageCifar100Tune(mnist.ImageMnistTune):
"""Cifar-100 Tune."""
@property
def num_classes(self):
return 100
@property
def num_channels(self):
return 3
@property
def class_labels(self):
return [
"beaver",
"dolphin",
"otter",
"seal",
"whale",
"aquarium fish",
"flatfish",
"ray",
"shark",
"trout",
"orchids",
"poppies",
"roses",
"sunflowers",
"tulips",
"bottles",
"bowls",
"cans",
"cups",
"plates",
"apples",
"mushrooms",
"oranges",
"pears",
"sweet peppers",
"clock",
"computer keyboard",
"lamp",
"telephone",
"television",
"bed",
"chair",
"couch",
"table",
"wardrobe",
"bee",
"beetle",
"butterfly",
"caterpillar",
"cockroach",
"bear",
"leopard",
"lion",
"tiger",
"wolf",
"bridge",
"castle",
"house",
"road",
"skyscraper",
"cloud",
"forest",
"mountain",
"plain",
"sea",
"camel",
"cattle",
"chimpanzee",
"elephant",
"kangaroo",
"fox",
"porcupine",
"possum",
"raccoon",
"skunk",
"crab",
"lobster",
"snail",
"spider",
"worm",
"baby",
"boy",
"girl",
"man",
"woman",
"crocodile",
"dinosaur",
"lizard",
"snake",
"turtle",
"hamster",
"mouse",
"rabbit",
"shrew",
"squirrel",
"maple",
"oak",
"palm",
"pine",
"willow",
"bicycle",
"bus",
"motorcycle",
"pickup truck",
"train",
"lawn-mower",
"rocket",
"streetcar",
"tank",
"tractor",
]
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
if mode == tf.estimator.ModeKeys.TRAIN:
image = image_utils.cifar_image_augmentation(image)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar100", tmp_dir, True, 48000)
else:
return cifar_generator("cifar100", tmp_dir, True, 2000, 48000)
@registry.register_problem
class ImageCifar100(ImageCifar100Tune):
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar100", tmp_dir, True, 50000)
else:
return cifar_generator("cifar100", tmp_dir, False, 10000)
@registry.register_problem
class ImageCifar100Plain(ImageCifar100):
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
@registry.register_problem
class ImageCifar100PlainGen(ImageCifar100Plain):
"""CIFAR-100 32x32 for image generation without standardization preprep."""
def dataset_filename(self):
return "image_cifar100_plain" # Reuse CIFAR-100 plain data.
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"].set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
example["inputs"] = tf.to_int64(example["inputs"])
return example
@registry.register_problem
class ImageCifar100Plain8(ImageCifar100):
"""CIFAR-100 rescaled to 8x8 for output: Conditional image generation."""
def dataset_filename(self):
return "image_cifar100_plain" # Reuse CIFAR-100 plain data.
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image = image_utils.resize_by_area(image, 8)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
@registry.register_problem
class Img2imgCifar100(ImageCifar100):
"""CIFAR-100 rescaled to 8x8 for input and 32x32 for output."""
def dataset_filename(self):
return "image_cifar100_plain" # Reuse CIFAR-100 plain data.
def preprocess_example(self, example, unused_mode, unused_hparams):
inputs = example["inputs"]
# For Img2Img resize input and output images as desired.
example["inputs"] = image_utils.resize_by_area(inputs, 8)
example["targets"] = image_utils.resize_by_area(inputs, 32)
return example
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.input_modality = {"inputs": ("image:identity", 256)}
p.target_modality = ("image:identity", 256)
p.batch_size_multiplier = 256
p.max_expected_batch_size_per_shard = 4
p.input_space_id = 1
p.target_space_id = 1
@registry.register_problem
class ImageCifar20Tune(mnist.ImageMnistTune):
"""Cifar-20 Tune."""
@property
def num_classes(self):
return 20
@property
def num_channels(self):
return 3
@property
def class_labels(self):
return [
"aquatic mammals",
"fish",
"flowers",
"food containers",
"fruit and vegetables",
"household electrical devices",
"household furniture",
"insects",
"large carnivores",
"large man-made outdoor things",
"large natural outdoor scenes",
"large omnivores and herbivores",
"medium-sized mammals",
"non-insect invertebrates",
"people",
"reptiles",
"small mammals",
"trees",
"vehicles 1",
"vehicles 2",
]
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
if mode == tf.estimator.ModeKeys.TRAIN:
image = image_utils.cifar_image_augmentation(image)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar20", tmp_dir, True, 48000)
else:
return cifar_generator("cifar20", tmp_dir, True, 2000, 48000)
@registry.register_problem
class ImageCifar20(ImageCifar20Tune):
def generator(self, data_dir, tmp_dir, is_training):
if is_training:
return cifar_generator("cifar20", tmp_dir, True, 50000)
else:
return cifar_generator("cifar20", tmp_dir, False, 10000)
@registry.register_problem
class ImageCifar20Plain(ImageCifar20):
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
@registry.register_problem
class ImageCifar20PlainGen(ImageCifar20Plain):
"""CIFAR-20 32x32 for image generation without standardization preprep."""
def dataset_filename(self):
return "image_cifar20_plain" # Reuse CIFAR-20 plain data.
def preprocess_example(self, example, mode, unused_hparams):
example["inputs"].set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3])
example["inputs"] = tf.to_int64(example["inputs"])
return example
@registry.register_problem
class ImageCifar20Plain8(ImageCifar20):
"""CIFAR-20 rescaled to 8x8 for output: Conditional image generation."""
def dataset_filename(self):
return "image_cifar20_plain" # Reuse CIFAR-20 plain data.
def preprocess_example(self, example, mode, unused_hparams):
image = example["inputs"]
image = image_utils.resize_by_area(image, 8)
if not self._was_reversed:
image = tf.image.per_image_standardization(image)
example["inputs"] = image
return example
| apache-2.0 | 8,912,669,523,249,537,000 | 28.168157 | 80 | 0.654339 | false |
TRox1972/youtube-dl | youtube_dl/extractor/bilibili.py | 10 | 4425 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import re
from .common import InfoExtractor
from ..compat import compat_parse_qs
from ..utils import (
int_or_none,
float_or_none,
unified_timestamp,
urlencode_postdata,
)
class BiliBiliIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|bangumi\.|)bilibili\.(?:tv|com)/(?:video/av|anime/v/)(?P<id>\d+)'
_TEST = {
'url': 'http://www.bilibili.tv/video/av1074402/',
'md5': '9fa226fe2b8a9a4d5a69b4c6a183417e',
'info_dict': {
'id': '1074402',
'ext': 'mp4',
'title': '【金坷垃】金泡沫',
'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923',
'duration': 308.315,
'timestamp': 1398012660,
'upload_date': '20140420',
'thumbnail': 're:^https?://.+\.jpg',
'uploader': '菊子桑',
'uploader_id': '156160',
},
}
_APP_KEY = '6f90a59ac58a4123'
_BILIBILI_KEY = '0bfd84cc3940035173f35e6777508326'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
if 'anime/v' not in url:
cid = compat_parse_qs(self._search_regex(
[r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)',
r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'],
webpage, 'player parameters'))['cid'][0]
else:
js = self._download_json(
'http://bangumi.bilibili.com/web_api/get_source', video_id,
data=urlencode_postdata({'episode_id': video_id}),
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'})
cid = js['result']['cid']
payload = 'appkey=%s&cid=%s&otype=json&quality=2&type=mp4' % (self._APP_KEY, cid)
sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest()
video_info = self._download_json(
'http://interface.bilibili.com/playurl?%s&sign=%s' % (payload, sign),
video_id, note='Downloading video info page')
entries = []
for idx, durl in enumerate(video_info['durl']):
formats = [{
'url': durl['url'],
'filesize': int_or_none(durl['size']),
}]
for backup_url in durl.get('backup_url', []):
formats.append({
'url': backup_url,
# backup URLs have lower priorities
'preference': -2 if 'hd.mp4' in backup_url else -3,
})
self._sort_formats(formats)
entries.append({
'id': '%s_part%s' % (video_id, idx),
'duration': float_or_none(durl.get('length'), 1000),
'formats': formats,
})
title = self._html_search_regex('<h1[^>]+title="([^"]+)">', webpage, 'title')
description = self._html_search_meta('description', webpage)
timestamp = unified_timestamp(self._html_search_regex(
r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', fatal=False))
thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage)
# TODO 'view_count' requires deobfuscating Javascript
info = {
'id': video_id,
'title': title,
'description': description,
'timestamp': timestamp,
'thumbnail': thumbnail,
'duration': float_or_none(video_info.get('timelength'), scale=1000),
}
uploader_mobj = re.search(
r'<a[^>]+href="https?://space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"',
webpage)
if uploader_mobj:
info.update({
'uploader': uploader_mobj.group('name'),
'uploader_id': uploader_mobj.group('id'),
})
for entry in entries:
entry.update(info)
if len(entries) == 1:
return entries[0]
else:
for idx, entry in enumerate(entries):
entry['id'] = '%s_part%d' % (video_id, (idx + 1))
return {
'_type': 'multi_video',
'id': video_id,
'title': title,
'description': description,
'entries': entries,
}
| unlicense | 942,922,727,783,032,300 | 34.224 | 102 | 0.506019 | false |
spark-test/spark | dev/pip-sanity-check.py | 12 | 1309 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import SparkSession
from pyspark.mllib.linalg import *
import sys
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PipSanityCheck")\
.getOrCreate()
sc = spark.sparkContext
rdd = sc.parallelize(range(100), 10)
value = rdd.reduce(lambda x, y: x + y)
if (value != 4950):
print("Value {0} did not match expected value.".format(value), file=sys.stderr)
sys.exit(-1)
print("Successfully ran pip sanity check")
spark.stop()
| apache-2.0 | 5,066,430,126,901,376,000 | 36.4 | 87 | 0.711994 | false |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/extensions/octavemagic.py | 7 | 10627 | # -*- coding: utf-8 -*-
"""
===========
octavemagic
===========
Magics for interacting with Octave via oct2py.
.. note::
The ``oct2py`` module needs to be installed separately and
can be obtained using ``easy_install`` or ``pip``.
You will also need a working copy of GNU Octave.
Usage
=====
To enable the magics below, execute ``%load_ext octavemagic``.
``%octave``
{OCTAVE_DOC}
``%octave_push``
{OCTAVE_PUSH_DOC}
``%octave_pull``
{OCTAVE_PULL_DOC}
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import tempfile
from glob import glob
from shutil import rmtree
import sys
import numpy as np
import oct2py
from xml.dom import minidom
from IPython.core.displaypub import publish_display_data
from IPython.core.magic import (Magics, magics_class, line_magic,
line_cell_magic, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.core.magic_arguments import (
argument, magic_arguments, parse_argstring
)
from IPython.utils.py3compat import unicode_to_str
from IPython.utils.text import dedent
class OctaveMagicError(oct2py.Oct2PyError):
pass
_mimetypes = {'png' : 'image/png',
'svg' : 'image/svg+xml',
'jpg' : 'image/jpeg',
'jpeg': 'image/jpeg'}
@magics_class
class OctaveMagics(Magics):
"""A set of magics useful for interactive work with Octave via oct2py.
"""
def __init__(self, shell):
"""
Parameters
----------
shell : IPython shell
"""
super(OctaveMagics, self).__init__(shell)
self._oct = oct2py.Oct2Py()
if sys.platform == 'win32':
# Use svg by default due to lack of Ghostscript on Windows Octave
self._plot_format = 'svg'
else:
self._plot_format = 'png'
# Allow publish_display_data to be overridden for
# testing purposes.
self._publish_display_data = publish_display_data
def _fix_gnuplot_svg_size(self, image, size=None):
"""
GnuPlot SVGs do not have height/width attributes. Set
these to be the same as the viewBox, so that the browser
scales the image correctly.
Parameters
----------
image : str
SVG data.
size : tuple of int
Image width, height.
"""
(svg,) = minidom.parseString(image).getElementsByTagName('svg')
viewbox = svg.getAttribute('viewBox').split(' ')
if size is not None:
width, height = size
else:
width, height = viewbox[2:]
svg.setAttribute('width', '%dpx' % width)
svg.setAttribute('height', '%dpx' % height)
return svg.toxml()
@skip_doctest
@line_magic
def octave_push(self, line):
'''
Line-level magic that pushes a variable to Octave.
`line` should be made up of whitespace separated variable names in the
IPython namespace::
In [7]: import numpy as np
In [8]: X = np.arange(5)
In [9]: X.mean()
Out[9]: 2.0
In [10]: %octave_push X
In [11]: %octave mean(X)
Out[11]: 2.0
'''
inputs = line.split(' ')
for input in inputs:
input = unicode_to_str(input)
self._oct.put(input, self.shell.user_ns[input])
@skip_doctest
@line_magic
def octave_pull(self, line):
'''
Line-level magic that pulls a variable from Octave.
::
In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello'
In [19]: %octave_pull x y
In [20]: x
Out[20]:
array([[ 1., 2.],
[ 3., 4.]])
In [21]: y
Out[21]: 'hello'
'''
outputs = line.split(' ')
for output in outputs:
output = unicode_to_str(output)
self.shell.push({output: self._oct.get(output)})
@skip_doctest
@magic_arguments()
@argument(
'-i', '--input', action='append',
help='Names of input variables to be pushed to Octave. Multiple names '
'can be passed, separated by commas with no whitespace.'
)
@argument(
'-o', '--output', action='append',
help='Names of variables to be pulled from Octave after executing cell '
'body. Multiple names can be passed, separated by commas with no '
'whitespace.'
)
@argument(
'-s', '--size', action='store',
help='Pixel size of plots, "width,height". Default is "-s 400,250".'
)
@argument(
'-f', '--format', action='store',
help='Plot format (png, svg or jpg).'
)
@needs_local_scope
@argument(
'code',
nargs='*',
)
@line_cell_magic
def octave(self, line, cell=None, local_ns=None):
'''
Execute code in Octave, and pull some of the results back into the
Python namespace::
In [9]: %octave X = [1 2; 3 4]; mean(X)
Out[9]: array([[ 2., 3.]])
As a cell, this will run a block of Octave code, without returning any
value::
In [10]: %%octave
....: p = [-2, -1, 0, 1, 2]
....: polyout(p, 'x')
-2*x^4 - 1*x^3 + 0*x^2 + 1*x^1 + 2
In the notebook, plots are published as the output of the cell, e.g.::
%octave plot([1 2 3], [4 5 6])
will create a line plot.
Objects can be passed back and forth between Octave and IPython via the
-i and -o flags in line::
In [14]: Z = np.array([1, 4, 5, 10])
In [15]: %octave -i Z mean(Z)
Out[15]: array([ 5.])
In [16]: %octave -o W W = Z * mean(Z)
Out[16]: array([ 5., 20., 25., 50.])
In [17]: W
Out[17]: array([ 5., 20., 25., 50.])
The size and format of output plots can be specified::
In [18]: %%octave -s 600,800 -f svg
...: plot([1, 2, 3]);
'''
args = parse_argstring(self.octave, line)
# arguments 'code' in line are prepended to the cell lines
if cell is None:
code = ''
return_output = True
else:
code = cell
return_output = False
code = ' '.join(args.code) + code
# if there is no local namespace then default to an empty dict
if local_ns is None:
local_ns = {}
if args.input:
for input in ','.join(args.input).split(','):
input = unicode_to_str(input)
try:
val = local_ns[input]
except KeyError:
val = self.shell.user_ns[input]
self._oct.put(input, val)
# generate plots in a temporary directory
plot_dir = tempfile.mkdtemp().replace('\\', '/')
if args.size is not None:
size = args.size
else:
size = '400,240'
if args.format is not None:
plot_format = args.format
elif sys.platform == 'win32':
# Use svg by default due to lack of Ghostscript on Windows Octave
plot_format = 'svg'
else:
plot_format = 'png'
pre_call = '''
global __ipy_figures = [];
page_screen_output(0);
function fig_create(src, event)
global __ipy_figures;
__ipy_figures(size(__ipy_figures) + 1) = src;
set(src, "visible", "off");
end
set(0, 'DefaultFigureCreateFcn', @fig_create);
close all;
clear ans;
# ___<end_pre_call>___ #
'''
post_call = '''
# ___<start_post_call>___ #
# Save output of the last execution
if exist("ans") == 1
_ = ans;
else
_ = nan;
end
for f = __ipy_figures
outfile = sprintf('%(plot_dir)s/__ipy_oct_fig_%%03d.png', f);
try
print(f, outfile, '-d%(plot_format)s', '-tight', '-S%(size)s');
end
end
''' % locals()
code = ' '.join((pre_call, code, post_call))
try:
text_output = self._oct.run(code, verbose=False)
except (oct2py.Oct2PyError) as exception:
msg = exception.message
msg = msg.split('# ___<end_pre_call>___ #')[1]
msg = msg.split('# ___<start_post_call>___ #')[0]
raise OctaveMagicError('Octave could not complete execution. '
'Traceback (currently broken in oct2py): %s'
% msg)
key = 'OctaveMagic.Octave'
display_data = []
# Publish text output
if text_output:
display_data.append((key, {'text/plain': text_output}))
# Publish images
images = [open(imgfile, 'rb').read() for imgfile in \
glob("%s/*" % plot_dir)]
rmtree(plot_dir)
plot_mime_type = _mimetypes.get(plot_format, 'image/png')
width, height = [int(s) for s in size.split(',')]
for image in images:
if plot_format == 'svg':
image = self._fix_gnuplot_svg_size(image, size=(width, height))
display_data.append((key, {plot_mime_type: image}))
if args.output:
for output in ','.join(args.output).split(','):
output = unicode_to_str(output)
self.shell.push({output: self._oct.get(output)})
for source, data in display_data:
self._publish_display_data(source, data)
if return_output:
ans = self._oct.get('_')
# Unfortunately, Octave doesn't have a "None" object,
# so we can't return any NaN outputs
if np.isscalar(ans) and np.isnan(ans):
ans = None
return ans
__doc__ = __doc__.format(
OCTAVE_DOC = dedent(OctaveMagics.octave.__doc__),
OCTAVE_PUSH_DOC = dedent(OctaveMagics.octave_push.__doc__),
OCTAVE_PULL_DOC = dedent(OctaveMagics.octave_pull.__doc__)
)
def load_ipython_extension(ip):
"""Load the extension in IPython."""
ip.register_magics(OctaveMagics)
| gpl-3.0 | 3,071,073,813,817,427,500 | 26.819372 | 80 | 0.514915 | false |
sacrefies/csc643_bigdata | project1/src/mongodb_connector.py | 1 | 3597 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 team1@course_bigdata, Saint Joseph's University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__doc__ = """mongodb_connector.py provides a conventient way to get a MongoDB
database object.
"""
from settings import DB_NAME, DB_PROTOCOL, DB_HOST, DB_PORT, COLLECTION
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, InvalidName
class MongoDB(object):
"""A class to connect to a MongoDB according to the settings.py"""
def __init__(self):
"""Create and initialize an instance of class MongoDB"""
self.__conn_str = None
self.__client = None
self.__db = None
def simple_connction_string(self):
"""Get the connection string.
:return: A legal MongoDB connection string from the settings.
"""
self.__conn_str = "%s%s:%s/" % (DB_PROTOCOL, DB_HOST, DB_PORT)
return self.__conn_str
def get_client(self):
"""Connect to the MongoDB and return a client object.
Refer to: http://api.mongodb.com/python/current/api/pymongo/mongo_client.html#pymongo.mongo_client.MongoClient
:return: An instance of class MongoClient
"""
if self.__client:
return self.__client
self.__client = MongoClient(self.simple_connction_string())
try:
# The ismaster command is cheap and does not require auth.
self.__client.admin.command('ismaster')
except ConnectionFailure as cf:
print 'Connecting to MongoDB failed: ', cf
self.__client = None
return self.__client
def get_database(self, name=None):
"""Get the database object with the specified name.
:param name: The name of the database. If given None or omitted,
this method use the name set in the settings file.
:return: An instance of Database.
"""
if not self.__client:
self.get_client()
dbname = name if name else DB_NAME
try:
self.__db = self.__client[dbname]
except InvalidName as ine:
self.__db = None
print 'No such database: %s. %s' % (dbname, ine)
return self.__db
def close(self):
if self.__client:
self.__client.close()
self.__db = None
self.__client = None
self.__conn_str = None
# unit tests
if __name__ == '__main__':
mongo = MongoDB()
cli = mongo.get_client()
if cli and cli.database_names():
print 'connect successful'
print 'databases: ',
for n in cli.database_names():
print '%s, ' % n,
print ''
db = mongo.get_database()
if db:
print 'database connected'
print 'database test collections: ',
for n in db.collection_names():
print '%s, ' % n,
print ''
print 'database test get document count: ',
collection = db[db.collection_names()[0]]
print collection.count()
mongo.close();
print 'Test finished'
| apache-2.0 | -8,888,494,421,145,992,000 | 31.116071 | 118 | 0.613845 | false |
cesarmarinhorj/cubes | cubes/providers.py | 7 | 15552 | # -*- coding: utf-8 -*-
"""Logical model model providers."""
import copy
import json
import re
from .logging import get_logger
from .errors import *
from .model import *
from .metadata import *
__all__ = [
"ModelProvider",
"StaticModelProvider",
"link_cube",
]
# Proposed Provider API:
# Provider.cube() – in abstract class
# Provider.provide_cube() – in concrete class, providers Cube object that
# might be modified later
# Provider.provide_dimension()
# Provider.link_cube(cube,locale)
# Provider.find_dimension(cube, locale)
#
# Provider is bound to namespace
# TODO: add tests
# TODO: needs to be reviewed
def link_cube(cube, locale, provider=None, namespace=None,
ignore_missing=False):
"""Links dimensions to the `cube` in the `context` object. The `context`
object should implement a function `dimension(name, locale, namespace,
provider)`. Modifies cube in place, returns the cube.
"""
# TODO: change this to: link_cube(cube, locale, namespace, provider)
# Assumption: empty cube
linked = set()
for dim_name in cube.dimension_links.keys():
if dim_name in linked:
raise ModelError("Dimension '{}' linked twice"
.format(dim_name))
try:
dim = find_dimension(dim_name, locale,
provider=provider,
namespace=namespace)
except TemplateRequired as e:
raise ModelError("Dimension template '%s' missing" % dim_name)
if not dim and not ignore_missing:
raise CubesError("Dimension '{}' not found.".format(dim_name))
cube.link_dimension(dim)
return cube
# TODO: add tests
def find_dimension(name, locale=None, provider=None, namespace=None):
"""Returns a localized dimension with `name`. Raises
`NoSuchDimensionError` when no model published the dimension. Raises
`RequiresTemplate` error when model provider requires a template to be
able to provide the dimension, but such template is not a public
dimension.
The standard lookup when linking a cube is:
1. look in the provider
2. look in the namespace – all providers within that namespace
"""
# Collected dimensions – to be used as templates
templates = {}
# Assumption: all dimensions that are to be used as templates should
# be public dimensions. If it is a private dimension, then the
# provider should handle the case by itself.
missing = [name]
while missing:
dimension = None
deferred = set()
name = missing.pop()
# First give a chance to provider, then to namespace
dimension = None
required_template = None
try:
dimension = _lookup_dimension(name, templates,
namespace, provider)
except TemplateRequired as e:
required_template = e.template
if required_template in templates:
raise BackendError("Some model provider didn't make use of "
"dimension template '%s' for '%s'"
% (required_template, name))
if required_template:
missing.append(name)
if required_template in missing:
raise ModelError("Dimension templates cycle in '%s'" %
required_template)
missing.append(required_template)
# Store the created dimension to be used as template
if dimension:
templates[name] = dimension
if namespace:
lookup = namespace.translation_lookup(locale)
if lookup:
# TODO: pass lookup instead of jsut first found translation
context = LocalizationContext(lookup[0])
trans = context.object_localization("dimensions", "inner")
dimension = dimension.localized(trans)
return dimension
# TODO: add tests
def _lookup_dimension(name, templates, namespace, provider):
"""Look-up a dimension `name` in `provider` and then in `namespace`.
`templates` is a dictionary with already instantiated dimensions that
can be used as templates.
"""
dimension = None
required_template = None
# 1. look in the povider
if provider:
try:
dimension = provider.dimension(name, templates=templates)
except NoSuchDimensionError:
pass
else:
return dimension
# 2. Look in the namespace
if namespace:
return namespace.dimension(name, templates=templates)
raise NoSuchDimensionError("Dimension '%s' not found" % name,
name=name)
class ModelProvider(object):
"""Abstract class – factory for model object. Currently empty and used
only to find other model providers."""
# TODO: Don't get metadata, but arbitrary arguments.
def __init__(self, metadata=None):
"""Base class for model providers. Initializes a model provider and
sets `metadata` – a model metadata dictionary.
Subclasses should call this method at the beginning of the custom
`__init__()`.
If a model provider subclass has a metadata that should be pre-pended
to the user-provided metadta, it should return it in
`default_metadata()`.
Subclasses should implement at least: :meth:`cubes.ModelProvider.cube`,
:meth:`cubes.ModelProvider.dimension` and
:meth:`cubes.ModelProvider.list_cubes` methods.
"""
self.store = None
# Get provider's defaults and pre-pend it to the user provided
# metadtata.
defaults = self.default_metadata()
self.metadata = self._merge_metadata(defaults, metadata)
# TODO: check for duplicates
self.dimensions_metadata = {}
for dim in self.metadata.get("dimensions", []):
self.dimensions_metadata[dim["name"]] = dim
self.cubes_metadata = {}
for cube in self.metadata.get("cubes", []):
self.cubes_metadata[cube["name"]] = cube
# TODO: decide which one to use
self.options = self.metadata.get("options", {})
self.options.update(self.metadata.get("browser_options", {}))
def _merge_metadata(self, metadata, other):
"""See `default_metadata()` for more information."""
metadata = dict(metadata)
other = dict(other)
cubes = metadata.pop("cubes", []) + other.pop("cubes", [])
if cubes:
metadata["cubes"] = cubes
dims = metadata.pop("dimensions", []) + other.pop("dimensions", [])
if dims:
metadata["dimensions"] = dims
joins = metadata.pop("joins", []) + other.pop("joins",[])
if joins:
metadata["joins"] = joins
mappings = metadata.pop("mappings", {})
mappings.update(other.pop("mappings", {}))
if mappings:
metadata["mappings"] = mappings
metadata.update(other)
return metadata
def default_metadata(self, metadata=None):
"""Returns metadata that are prepended to the provided model metadata.
`metadata` is user-provided metadata and might be used to decide what
kind of default metadata are returned.
The metadata are merged as follows:
* cube lists are concatenated (no duplicity checking)
* dimension lists are concatenated (no duplicity checking)
* joins are concatenated
* default mappings are updated with the model's mappings
Default implementation returns empty metadata.
"""
return {}
# TODO: remove this in favor of provider configuration: store=
def requires_store(self):
"""Return `True` if the provider requires a store. Subclasses might
override this method. Default implementation returns `False`"""
return False
# TODO: bind this automatically on provider configuration: store (see
# requires_store() function)
def bind(self, store):
"""Set's the provider's `store`. """
self.store = store
self.initialize_from_store()
def initialize_from_store(self):
"""This method is called after the provider's `store` was set.
Override this method if you would like to perform post-initialization
from the store."""
pass
def cube_options(self, cube_name):
"""Returns an options dictionary for cube `name`. The options
dictoinary is merged model `options` metadata with cube's `options`
metadata if exists. Cube overrides model's global (default)
options."""
options = dict(self.options)
if cube_name in self.cubes_metadata:
cube = self.cubes_metadata[cube_name]
# TODO: decide which one to use
options.update(cube.get("options", {}))
options.update(cube.get("browser_options", {}))
return options
def dimension_metadata(self, name, locale=None):
"""Returns a metadata dictionary for dimension `name` and optional
`locale`.
Subclasses should override this method and call the super if they
would like to merge metadata provided in a model file."""
try:
return self.dimensions_metadata[name]
except KeyError:
raise NoSuchDimensionError("No such dimension '%s'" % name, name)
def cube_metadata(self, name, locale=None):
"""Returns a cube metadata by combining model's global metadata and
cube's metadata. Merged metadata dictionaries: `browser_options`,
`mappings`, `joins`.
Subclasses should override this method and call the super if they
would like to merge metadata provided in a model file.
.. note:
If provider is caching a cube metadata, it should store a cache
for localized version of the cube metadata.
"""
if name in self.cubes_metadata:
metadata = dict(self.cubes_metadata[name])
else:
raise NoSuchCubeError("No such cube '%s'" % name, name)
# merge browser_options
browser_options = self.metadata.get('browser_options', {})
if metadata.get('browser_options'):
browser_options.update(metadata.get('browser_options'))
metadata['browser_options'] = browser_options
# Merge model and cube mappings
#
model_mappings = self.metadata.get("mappings")
cube_mappings = metadata.pop("mappings", {})
if model_mappings:
mappings = copy.deepcopy(model_mappings)
mappings.update(cube_mappings)
else:
mappings = cube_mappings
metadata["mappings"] = mappings
# Merge model and cube joins
#
model_joins = self.metadata.get("joins", [])
cube_joins = metadata.pop("joins", [])
# model joins, if present, should be merged with cube's overrides.
# joins are matched by the "name" key.
if cube_joins and model_joins:
model_join_map = {}
for join in model_joins:
try:
jname = join['name']
except KeyError:
raise ModelError("Missing required 'name' key in "
"model-level joins.")
if jname in model_join_map:
raise ModelError("Duplicate model-level join 'name': %s" %
jname)
model_join_map[jname] = copy.deepcopy(join)
# Merge cube's joins with model joins by their names.
merged_joins = []
for join in cube_joins:
name = join.get('name')
if name and name in model_join_map:
model_join = dict(model_join_map[name])
else:
model_join = {}
model_join.update(join)
merged_joins.append(model_join)
else:
merged_joins = cube_joins
# Validate joins:
for join in merged_joins:
if "master" not in join:
raise ModelError("No master in join for cube '%s' "
"(join name: %s)" % (name, join.get("name")))
if "detail" not in join:
raise ModelError("No detail in join for cube '%s' "
"(join name: %s)" % (name, join.get("name")))
metadata["joins"] = merged_joins
return metadata
def list_cubes(self):
"""Get a list of metadata for cubes in the workspace. Result is a list
of dictionaries with keys: `name`, `label`, `category`, `info`.
The list is fetched from the model providers on the call of this
method.
Subclassees should implement this method.
"""
raise NotImplementedError("Subclasses should implement list_cubes()")
def has_cube(self, name):
"""Returns `True` if the provider has cube `name`. Otherwise returns
`False`."""
return name in self.cubes_metadata
def cube(self, name, locale=None, namespace=None):
"""Returns a cube with `name` provided by the receiver. If receiver
does not have the cube `NoSuchCube` exception is raised.
Note: The returned cube will not have the dimensions assigned.
It is up to the caller's responsibility to assign appropriate
dimensions based on the cube's `dimension_links`.
Subclasses of `ModelProvider` might override this method if they would
like to create the `Cube` object directly.
.. note:
If provider is caching a cube, it should store a cache for
localized version of the cube.
"""
metadata = self.cube_metadata(name, locale)
cube = Cube.from_metadata(metadata)
link_cube(cube, locale, provider=self, namespace=namespace)
return cube
def dimension(self, name, templates=[], locale=None):
"""Returns a dimension with `name` provided by the receiver.
`dimensions` is a dictionary of dimension objects where the receiver
can look for templates. If the dimension requires a template and the
template is missing, the subclasses should raise
`TemplateRequired(template)` error with a template name as an
argument.
If the receiver does not provide the dimension `NoSuchDimension`
exception is raised.
"""
metadata = self.dimension_metadata(name, locale)
return Dimension.from_metadata(metadata, templates=templates)
# TODO: make this FileModelProvider
class StaticModelProvider(ModelProvider):
__extension_aliases__ = ["default"]
def __init__(self, *args, **kwargs):
super(StaticModelProvider, self).__init__(*args, **kwargs)
# Initialization code goes here...
def list_cubes(self):
"""Returns a list of cubes from the metadata."""
cubes = []
for cube in self.metadata.get("cubes", []):
info = {
"name": cube["name"],
"label": cube.get("label", cube["name"]),
"category": (cube.get("category") or cube.get("info", {}).get("category")),
"info": cube.get("info", {})
}
cubes.append(info)
return cubes
| mit | 3,176,609,446,727,866,000 | 33.078947 | 95 | 0.601802 | false |
craigem/effectivepython | item_09.py | 1 | 1455 | #!/usr/bin/env python3
'''Item 9 from Effective Python'''
# Example 1
''' If the file is absolutely enormous or perhaps a never-ending network
socket, list comprehensions are problematic. Here, I use a list comprehension
in a way that can only handle small input values '''
print('Example 1:\n==========')
import random
with open('my_file.txt', 'w') as f:
for _ in range(10):
f.write('a' * random.randint(0, 100))
f.write('\n')
value = [len(x) for x in open('my_file.txt')]
print(value)
# Example 2
''' the generator expression immediately evaluates to an iterator and doesn't
make any forward progress '''
print('\nExample 2:\n==========')
it = (len(x) for x in open('my_file.txt'))
print(it)
# Example 3
''' The returned iterator can be advanced one step at a time to produce the
next output from the generator expression as needed (using the next built-in
function) '''
print('\nExample 3:\n==========')
print(next(it))
print(next(it))
# Example 4
''' take the iterator returned by the gen- erator expression above and use it
as the input for another generator expression '''
print('\nExample 4:\n==========')
roots = ((x, x**0.5) for x in it)
print(roots)
# Example 5
''' Each time I advance this iterator, it will also advance the interior
iterator, creating a domino effect of looping, evaluating conditional
expressions, and passing around inputs and outputs '''
print('\nExample 5:\n==========')
print(next(roots))
| gpl-3.0 | 1,900,474,966,403,767,800 | 27.529412 | 77 | 0.683162 | false |
nuagenetworks/vspk-python | vspk/v5_0/fetchers/nugroups_fetcher.py | 2 | 2086 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NUGroupsFetcher(NURESTFetcher):
""" Represents a NUGroups fetcher
Notes:
This fetcher enables to fetch NUGroup objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NUGroup class that is managed.
Returns:
.NUGroup: the managed class
"""
from .. import NUGroup
return NUGroup
| bsd-3-clause | -7,684,816,828,351,298,000 | 38.377358 | 86 | 0.721956 | false |
Theer108/invenio | invenio/base/i18n.py | 10 | 4183 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio international messages functions, to be used by all
I18N interfaces. Typical usage in the caller code is:
from messages import gettext_set_language
[...]
def square(x, ln=CFG_SITE_LANG):
_ = gettext_set_language(ln)
print _("Hello there!")
print _("The square of %(x_num)s is %(x_value)s.", x_num=x, x_value=x*x)
In the caller code, all output strings should be made translatable via
the _() convention.
For more information, see ABOUT-NLS file.
"""
import babel
import gettext
from flask_babel import gettext, lazy_gettext
from six import iteritems
# Placemark for the i18n function
_ = lazy_gettext
def gettext_set_language(ln, use_unicode=False):
"""Set the _ gettext function in every caller function
Usage::
_ = gettext_set_language(ln)
"""
from invenio.ext.babel import set_locale
with set_locale(ln):
if not use_unicode:
def unicode_gettext_wrapper(text, **kwargs):
from invenio.base.helpers import unicodifier
from invenio.utils.text import wash_for_utf8
return wash_for_utf8(gettext(unicodifier(text),
**unicodifier(kwargs)))
return unicode_gettext_wrapper
return gettext
def wash_language(ln):
"""Look at language LN and check if it is one of allowed languages
for the interface. Return it in case of success, return the
default language otherwise."""
from invenio.config import CFG_SITE_LANG, CFG_SITE_LANGS
if not ln:
return CFG_SITE_LANG
if isinstance(ln, list):
ln = ln[0]
ln = ln.replace('-', '_')
if ln in CFG_SITE_LANGS:
return ln
elif ln[:2] in CFG_SITE_LANGS:
return ln[:2]
else:
return CFG_SITE_LANG
def wash_languages(lns):
"""Look at list of languages LNS and check if there's at least one
of the allowed languages for the interface. Return it in case
of success, return the default language otherwise."""
from invenio.config import CFG_SITE_LANG, CFG_SITE_LANGS
for ln in lns:
if ln:
ln = ln.replace('-', '_')
if ln in CFG_SITE_LANGS:
return ln
elif ln[:2] in CFG_SITE_LANGS:
return ln[:2]
return CFG_SITE_LANG
def language_list_long(enabled_langs_only=True):
"""
Return list of [short name, long name] for all enabled languages,
in the same language order as they appear in CFG_SITE_LANG.
If 'enabled_langs_only' is set to False, then return all possibly
existing Invenio languages, even if they were not enabled on the
site by the local administrator. Useful for recognizing all I18N
translations in webdoc sources or bibformat templates.
"""
if enabled_langs_only:
from invenio.config import CFG_SITE_LANGS
else:
from invenio.base.config import CFG_SITE_LANGS
return map(lambda ln: [ln, babel.Locale.parse(ln).get_language_name()],
CFG_SITE_LANGS)
def is_language_rtl(ln):
"""
Returns True or False depending on whether language is
right-to-left direction.
@param ln: language
@type ln: str
@return: is language right-to-left direction?
@rtype: bool
"""
if ln in ('ar', 'fa'):
return True
return False
| gpl-2.0 | -6,845,603,090,411,437,000 | 31.679688 | 79 | 0.656706 | false |
SmokinCaterpillar/pypet | examples/example_05_custom_parameter.py | 2 | 10056 | __author__ = 'Robert Meyer'
import numpy as np
import inspect
import os # For path names being viable under Windows and Linux
from pypet import Environment, Parameter, ArrayParameter, Trajectory
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Here we will see how we can write our own custom parameters and how we can use
# it with a trajectory.
# Now we want to do a more sophisticated simulations, we will integrate a differential equation
# with an Euler scheme
# Let's first define our job to do
def euler_scheme(traj, diff_func):
"""Simulation function for Euler integration.
:param traj:
Container for parameters and results
:param diff_func:
The differential equation we want to integrate
"""
steps = traj.steps
initial_conditions = traj.initial_conditions
dimension = len(initial_conditions)
# This array will collect the results
result_array = np.zeros((steps,dimension))
# Get the function parameters stored into `traj` as a dictionary
# with the (short) names as keys :
func_params_dict = traj.func_params.f_to_dict(short_names=True, fast_access=True)
# Take initial conditions as first result
result_array[0] = initial_conditions
# Now we compute the Euler Scheme steps-1 times
for idx in range(1,steps):
result_array[idx] = diff_func(result_array[idx-1], **func_params_dict) * traj.dt + \
result_array[idx-1]
# Note the **func_params_dict unzips the dictionary, it's the reverse of **kwargs in function
# definitions!
#Finally we want to keep the results
traj.f_add_result('euler_evolution', data=result_array, comment='Our time series data!')
# Ok, now we want to make our own (derived) parameter that stores source code of python functions.
# We do NOT want a parameter that stores an executable function. This would complicate
# the problem a lot. If you have something like that in mind, you might wanna take a look
# at the marshal (http://docs.python.org/2/library/marshal) module
# or dill (https://pypi.python.org/pypi/dill) package.
# Our intention here is to define a parameter that we later on use as a derived parameter
# to simply keep track of the source code we use ('git' would be, of course, the better solution
# but this is just an illustrative example)
class FunctionParameter(Parameter):
# We need to override the `f_set` function and simply extract the the source code if our
# item is callable and store this instead.
def f_set(self, data):
if callable(data):
data = inspect.getsource(data)
return super(FunctionParameter, self).f_set(data)
# For more complicate parameters you might consider implementing:
# `f_supports` (we do not need it since we convert the data to stuff the parameter already
# supports, and that is strings!)
#
# and
# the private functions
#
# `_values_of_same_type` (to tell whether data is similar, i.e. of two data items agree in their
# type, this is important to only allow exploration within the same dimension.
# For instance, a parameter that stores integers, should only explore integers etc.)
#
# and
#
# `_equal_values` (to tell if two data items are equal. This is important for merging if you
# want to erase duplicate parameter points. The trajectory needs to know when a
# parameter space point was visited before.)
#
# and
#
# `_store` (to be able to turn complex data into basic types understood by the storage service)
#
# and
#
# `_load` (to be able to recover your complex data form the basic types understood by the storage
# service)
#
# But for now we will rely on the parent functions and hope for the best!
# Ok now let's follow the ideas in the final section of the cookbook and let's
# have a part in our simulation that only defines the parameters.
def add_parameters(traj):
"""Adds all necessary parameters to the `traj` container"""
traj.f_add_parameter('steps', 10000, comment='Number of time steps to simulate')
traj.f_add_parameter('dt', 0.01, comment='Step size')
# Here we want to add the initial conditions as an array parameter. We will simulate
# a 3-D differential equation, the Lorenz attractor.
traj.f_add_parameter(ArrayParameter,'initial_conditions', np.array([0.0,0.0,0.0]),
comment = 'Our initial conditions, as default we will start from'
' origin!')
# We will group all parameters of the Lorenz differential equation into the group 'func_params'
traj.f_add_parameter('func_params.sigma', 10.0)
traj.f_add_parameter('func_params.beta', 8.0/3.0)
traj.f_add_parameter('func_params.rho', 28.0)
#For the fun of it we will annotate the group
traj.func_params.v_annotations.info='This group contains as default the original values chosen ' \
'by Edward Lorenz in 1963. Check it out on wikipedia ' \
'(https://en.wikipedia.org/wiki/Lorenz_attractor)!'
# We need to define the lorenz function, we will assume that the value array is 3 dimensional,
# First dimension contains the x-component, second y-component, and third the z-component
def diff_lorenz(value_array, sigma, beta, rho):
"""The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
"""
diff_array = np.zeros(3)
diff_array[0] = sigma * (value_array[1]-value_array[0])
diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1]
diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2]
return diff_array
# And here goes our main function
def main():
filename = os.path.join('hdf5', 'example_05.hdf5')
env = Environment(trajectory='Example_05_Euler_Integration',
filename=filename,
file_title='Example_05_Euler_Integration',
overwrite_file=True,
comment='Go for Euler!')
traj = env.trajectory
trajectory_name = traj.v_name
# 1st a) phase parameter addition
add_parameters(traj)
# 1st b) phase preparation
# We will add the differential equation (well, its source code only) as a derived parameter
traj.f_add_derived_parameter(FunctionParameter,'diff_eq', diff_lorenz,
comment='Source code of our equation!')
# We want to explore some initial conditions
traj.f_explore({'initial_conditions' : [
np.array([0.01,0.01,0.01]),
np.array([2.02,0.02,0.02]),
np.array([42.0,4.2,0.42])
]})
# 3 different conditions are enough for an illustrative example
# 2nd phase let's run the experiment
# We pass `euler_scheme` as our top-level simulation function and
# the Lorenz equation 'diff_lorenz' as an additional argument
env.run(euler_scheme, diff_lorenz)
# We don't have a 3rd phase of post-processing here
# 4th phase analysis.
# I would recommend to do post-processing completely independent from the simulation,
# but for simplicity let's do it here.
# Let's assume that we start all over again and load the entire trajectory new.
# Yet, there is an error within this approach, do you spot it?
del traj
traj = Trajectory(filename=filename)
# We will only fully load parameters and derived parameters.
# Results will be loaded manually later on.
try:
# However, this will fail because our trajectory does not know how to
# build the FunctionParameter. You have seen this coming, right?
traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
load_results=1)
except ImportError as e:
print('That did\'nt work, I am sorry: %s ' % str(e))
# Ok, let's try again but this time with adding our parameter to the imports
traj = Trajectory(filename=filename,
dynamically_imported_classes=FunctionParameter)
# Now it works:
traj.f_load(name=trajectory_name, load_parameters=2, load_derived_parameters=2,
load_results=1)
#For the fun of it, let's print the source code
print('\n ---------- The source code of your function ---------- \n %s' % traj.diff_eq)
# Let's get the exploration array:
initial_conditions_exploration_array = traj.f_get('initial_conditions').f_get_range()
# Now let's plot our simulated equations for the different initial conditions:
# We will iterate through the run names
for idx, run_name in enumerate(traj.f_get_run_names()):
#Get the result of run idx from the trajectory
euler_result = traj.results.f_get(run_name).euler_evolution
# Now we manually need to load the result. Actually the results are not so large and we
# could load them all at once. But for demonstration we do as if they were huge:
traj.f_load_item(euler_result)
euler_data = euler_result.data
#Plot fancy 3d plot
fig = plt.figure(idx)
ax = fig.gca(projection='3d')
x = euler_data[:,0]
y = euler_data[:,1]
z = euler_data[:,2]
ax.plot(x, y, z, label='Initial Conditions: %s' % str(initial_conditions_exploration_array[idx]))
plt.legend()
plt.show()
# Now we free the data again (because we assume its huuuuuuge):
del euler_data
euler_result.f_empty()
# You have to click through the images to stop the example_05 module!
# Finally disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
main()
| bsd-3-clause | -4,568,353,429,156,449,300 | 39.063745 | 105 | 0.667363 | false |
yadayada/acd_cli | acdcli/api/oauth.py | 1 | 11368 | import os
import json
import requests
import time
import logging
import webbrowser
import datetime
import random
import string
from requests.auth import AuthBase
from urllib.parse import urlparse, parse_qs
from threading import Lock
logger = logging.getLogger(__name__)
TOKEN_INFO_URL = 'https://api.amazon.com/auth/o2/tokeninfo'
def create_handler(path: str):
from .common import RequestError
try:
return LocalOAuthHandler(path)
except (KeyError, RequestError, KeyboardInterrupt, EOFError, SystemExit):
raise
except:
pass
return AppspotOAuthHandler(path)
class OAuthHandler(AuthBase):
OAUTH_DATA_FILE = 'oauth.json'
class KEYS(object):
EXP_IN = 'expires_in'
ACC_TOKEN = 'access_token'
REFR_TOKEN = 'refresh_token'
EXP_TIME = 'exp_time' # manually added
REDIRECT_URI = 'redirect_uri' # only for local
def __init__(self, path):
self.path = path
self.oauth_data = {}
self.oauth_data_path = os.path.join(path, self.OAUTH_DATA_FILE)
self.init_time = time.time()
self.lock = Lock()
def __call__(self, r: requests.Request):
with self.lock:
r.headers['Authorization'] = self.get_auth_token()
return r
@property
def exp_time(self):
return self.oauth_data[self.KEYS.EXP_TIME]
@classmethod
def validate(cls, oauth: str) -> dict:
"""Deserialize and validate an OAuth string
:raises: RequestError"""
from .common import RequestError
try:
o = json.loads(oauth)
o[cls.KEYS.ACC_TOKEN]
o[cls.KEYS.EXP_IN]
o[cls.KEYS.REFR_TOKEN]
return o
except (ValueError, KeyError) as e:
logger.critical('Invalid authentication token: Invalid JSON or missing key.'
'Token:\n%s' % oauth)
raise RequestError(RequestError.CODE.INVALID_TOKEN, e.__str__())
def treat_auth_token(self, time_: float):
"""Adds expiration time to member OAuth dict using specified begin time."""
exp_time = time_ + self.oauth_data[self.KEYS.EXP_IN] - 120
self.oauth_data[self.KEYS.EXP_TIME] = exp_time
logger.info('New token expires at %s.'
% datetime.datetime.fromtimestamp(exp_time).isoformat(' '))
def load_oauth_data(self):
"""Loads oauth data file, validate and add expiration time if necessary"""
self.check_oauth_file_exists()
with open(self.oauth_data_path) as oa:
o = oa.read()
try:
self.oauth_data = self.validate(o)
except:
logger.critical('Local OAuth data file "%s" is invalid. '
'Please fix or delete it.' % self.oauth_data_path)
raise
if self.KEYS.EXP_TIME not in self.oauth_data:
self.treat_auth_token(self.init_time)
self.write_oauth_data()
else:
self.get_auth_token(reload=False)
def get_auth_token(self, reload=True) -> str:
"""Gets current access token, refreshes if necessary.
:param reload: whether the oauth token file should be reloaded (external update)"""
if time.time() > self.exp_time:
logger.info('Token expired at %s.'
% datetime.datetime.fromtimestamp(self.exp_time).isoformat(' '))
# if multiple instances are running, check for updated file
if reload:
with open(self.oauth_data_path) as oa:
o = oa.read()
self.oauth_data = self.validate(o)
if time.time() > self.exp_time:
self.refresh_auth_token()
else:
logger.info('Externally updated token found in oauth file.')
return "Bearer " + self.oauth_data[self.KEYS.ACC_TOKEN]
def write_oauth_data(self):
"""Dumps (treated) OAuth dict to file as JSON."""
new_nm = self.oauth_data_path + ''.join(random.choice(string.hexdigits) for _ in range(8))
rm_nm = self.oauth_data_path + ''.join(random.choice(string.hexdigits) for _ in range(8))
f = open(new_nm, 'w')
json.dump(self.oauth_data, f, indent=4, sort_keys=True)
f.flush()
os.fsync(f.fileno())
f.close()
if os.path.isfile(self.oauth_data_path):
os.rename(self.oauth_data_path, rm_nm)
os.rename(new_nm, self.oauth_data_path)
try:
os.remove(rm_nm)
except OSError:
pass
def refresh_auth_token(self):
"""Fetches a new access token using the refresh token."""
raise NotImplementedError
def check_oauth_file_exists(self):
"""Checks for OAuth file existence and one-time initialize if necessary. Throws on error."""
raise NotImplementedError
def get_access_token_info(self) -> dict:
"""
:returns:
int exp: expiration time in sec,
str aud: client id
user_id, app_id, iat (exp time)"""
r = requests.get(TOKEN_INFO_URL,
params={'access_token': self.oauth_data['access_token']})
return r.json()
class AppspotOAuthHandler(OAuthHandler):
APPSPOT_URL = 'https://acd-api-oa.appspot.com/'
def __init__(self, path):
super().__init__(path)
self.load_oauth_data()
logger.info('%s initialized' % self.__class__.__name__)
def check_oauth_file_exists(self):
"""Checks for existence of oauth token file and instructs user to visit
the Appspot page if it was not found.
:raises: FileNotFoundError if oauth file was not placed into cache directory"""
if os.path.isfile(self.oauth_data_path):
return
input('For the one-time authentication a browser (tab) will be opened at %s.\n'
% AppspotOAuthHandler.APPSPOT_URL + 'Please accept the request and ' +
'save the plaintext response data into a file called "%s" ' % self.OAUTH_DATA_FILE +
'in the directory "%s".\nPress a key to open a browser.\n' % self.path)
webbrowser.open_new_tab(AppspotOAuthHandler.APPSPOT_URL)
input('Press a key if you have saved the "%s" file into "%s".\n'
% (self.OAUTH_DATA_FILE, self.path))
with open(self.oauth_data_path):
pass
def refresh_auth_token(self):
""":raises: RequestError"""
logger.info('Refreshing authentication token.')
ref = {self.KEYS.REFR_TOKEN: self.oauth_data[self.KEYS.REFR_TOKEN]}
t = time.time()
from .common import RequestError, ConnectionError
try:
response = requests.post(self.APPSPOT_URL, data=ref)
except ConnectionError as e:
logger.critical('Error refreshing authentication token.')
raise RequestError(RequestError.CODE.CONN_EXCEPTION, e.__str__())
if response.status_code != requests.codes.ok:
raise RequestError(RequestError.CODE.REFRESH_FAILED,
'Error refreshing authentication token: %s' % response.text)
r = self.validate(response.text)
self.oauth_data = r
self.treat_auth_token(t)
self.write_oauth_data()
class LocalOAuthHandler(OAuthHandler):
"""A local OAuth handler that works with a whitelisted security profile.
The profile must not be created prior to June 2015. Profiles created prior to this month
are not able to use the new scope "clouddrive:read_all" that replaces "clouddrive:read".
https://developer.amazon.com/public/apis/experience/cloud-drive/content/getting-started"""
CLIENT_DATA_FILE = 'client_data'
AMAZON_OA_LOGIN_URL = 'https://amazon.com/ap/oa'
AMAZON_OA_TOKEN_URL = 'https://api.amazon.com/auth/o2/token'
REDIRECT_URI = 'http://localhost'
def __init__(self, path):
super().__init__(path)
self.client_data = {}
self.client_id = lambda: self.client_data.get('CLIENT_ID')
self.client_secret = lambda: self.client_data.get('CLIENT_SECRET')
self.OAUTH_ST1 = lambda: {'client_id': self.client_id(),
'response_type': 'code',
'scope': 'clouddrive:read_all clouddrive:write',
'redirect_uri': self.REDIRECT_URI}
self.OAUTH_ST2 = lambda: {'grant_type': 'authorization_code',
'code': None,
'client_id': self.client_id(),
'client_secret': self.client_secret(),
'redirect_uri': self.REDIRECT_URI}
self.OAUTH_REF = lambda: {'grant_type': 'refresh_token',
'refresh_token': None,
'client_id': self.client_id(),
'client_secret': self.client_secret(),
'redirect_uri': self.REDIRECT_URI}
self.load_client_data()
self.load_oauth_data()
logger.info('%s initialized.' % self.__class__.__name__)
def load_client_data(self):
""":raises: IOError if client data file was not found
:raises: KeyError if client data file has missing key(s)"""
cdp = os.path.join(self.path, self.CLIENT_DATA_FILE)
with open(cdp) as cd:
self.client_data = json.load(cd)
if self.client_id() == '' or self.client_secret() == '':
logger.critical('Client ID or client secret empty or key absent.')
raise KeyError
def check_oauth_file_exists(self):
""":raises: Exception"""
if not os.path.isfile(self.oauth_data_path):
from urllib.parse import urlencode
url = self.AMAZON_OA_LOGIN_URL + '?' + urlencode(self.OAUTH_ST1())
webbrowser.open_new_tab(url)
print('A window will have opened at %s' % url)
ret_url = input('Please log in or accept '
'and enter the URL you have been redirected to: ')
ret_q = parse_qs(urlparse(ret_url).query)
st2 = self.OAUTH_ST2()
st2['code'] = ret_q['code'][0]
response = requests.post(self.AMAZON_OA_TOKEN_URL, data=st2)
self.oauth_data = self.validate(response.text)
self.write_oauth_data()
def refresh_auth_token(self):
""":raises: RequestError"""
logger.info('Refreshing authentication token.')
ref = self.OAUTH_REF()
ref[self.KEYS.REFR_TOKEN] = self.oauth_data[self.KEYS.REFR_TOKEN]
from .common import RequestError
t = time.time()
try:
response = requests.post(self.AMAZON_OA_TOKEN_URL, data=ref)
except ConnectionError as e:
logger.critical('Error refreshing authentication token.')
raise RequestError(RequestError.CODE.CONN_EXCEPTION, e.__str__())
if response.status_code != requests.codes.ok:
raise RequestError(RequestError.CODE.REFRESH_FAILED,
'Error refreshing authentication token: %s' % response.text)
self.oauth_data = self.validate(response.text)
self.treat_auth_token(t)
self.write_oauth_data()
| gpl-2.0 | 3,480,279,032,755,513,000 | 35.203822 | 100 | 0.584887 | false |
googleapis/googleapis-gen | google/cloud/sql/v1beta4/sql-v1beta4-py/google/cloud/sql_v1beta4/services/sql_flags_service/async_client.py | 1 | 8796 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.sql_v1beta4.types import cloud_sql
from google.cloud.sql_v1beta4.types import cloud_sql_resources
from .transports.base import SqlFlagsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import SqlFlagsServiceGrpcAsyncIOTransport
from .client import SqlFlagsServiceClient
class SqlFlagsServiceAsyncClient:
""""""
_client: SqlFlagsServiceClient
DEFAULT_ENDPOINT = SqlFlagsServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = SqlFlagsServiceClient.DEFAULT_MTLS_ENDPOINT
common_billing_account_path = staticmethod(SqlFlagsServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(SqlFlagsServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(SqlFlagsServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(SqlFlagsServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(SqlFlagsServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(SqlFlagsServiceClient.parse_common_organization_path)
common_project_path = staticmethod(SqlFlagsServiceClient.common_project_path)
parse_common_project_path = staticmethod(SqlFlagsServiceClient.parse_common_project_path)
common_location_path = staticmethod(SqlFlagsServiceClient.common_location_path)
parse_common_location_path = staticmethod(SqlFlagsServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SqlFlagsServiceAsyncClient: The constructed client.
"""
return SqlFlagsServiceClient.from_service_account_info.__func__(SqlFlagsServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
SqlFlagsServiceAsyncClient: The constructed client.
"""
return SqlFlagsServiceClient.from_service_account_file.__func__(SqlFlagsServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> SqlFlagsServiceTransport:
"""Returns the transport used by the client instance.
Returns:
SqlFlagsServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(SqlFlagsServiceClient).get_transport_class, type(SqlFlagsServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, SqlFlagsServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the sql flags service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.SqlFlagsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = SqlFlagsServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list(self,
request: cloud_sql.SqlFlagsListRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cloud_sql_resources.FlagsListResponse:
r"""List all available database flags for Cloud SQL
instances.
Args:
request (:class:`google.cloud.sql_v1beta4.types.SqlFlagsListRequest`):
The request object.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.sql_v1beta4.types.FlagsListResponse:
Flags list response.
"""
# Create or coerce a protobuf request object.
request = cloud_sql.SqlFlagsListRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-sql",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"SqlFlagsServiceAsyncClient",
)
| apache-2.0 | 7,890,718,213,699,969,000 | 41.699029 | 142 | 0.668827 | false |
CYBAI/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/h2/frame_buffer.py | 37 | 7300 | # -*- coding: utf-8 -*-
"""
h2/frame_buffer
~~~~~~~~~~~~~~~
A data structure that provides a way to iterate over a byte buffer in terms of
frames.
"""
from hyperframe.exceptions import UnknownFrameError, InvalidFrameError
from hyperframe.frame import (
Frame, HeadersFrame, ContinuationFrame, PushPromiseFrame
)
from .exceptions import (
ProtocolError, FrameTooLargeError, FrameDataMissingError
)
# To avoid a DOS attack based on sending loads of continuation frames, we limit
# the maximum number we're perpared to receive. In this case, we'll set the
# limit to 64, which means the largest encoded header block we can receive by
# default is 262144 bytes long, and the largest possible *at all* is 1073741760
# bytes long.
#
# This value seems reasonable for now, but in future we may want to evaluate
# making it configurable.
CONTINUATION_BACKLOG = 64
class FrameBuffer(object):
"""
This is a data structure that expects to act as a buffer for HTTP/2 data
that allows iteraton in terms of H2 frames.
"""
def __init__(self, server=False):
self.data = b''
self.max_frame_size = 0
self._preamble = b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n' if server else b''
self._preamble_len = len(self._preamble)
self._headers_buffer = []
def add_data(self, data):
"""
Add more data to the frame buffer.
:param data: A bytestring containing the byte buffer.
"""
if self._preamble_len:
data_len = len(data)
of_which_preamble = min(self._preamble_len, data_len)
if self._preamble[:of_which_preamble] != data[:of_which_preamble]:
raise ProtocolError("Invalid HTTP/2 preamble.")
data = data[of_which_preamble:]
self._preamble_len -= of_which_preamble
self._preamble = self._preamble[of_which_preamble:]
self.data += data
def _parse_frame_header(self, data):
"""
Parses the frame header from the data. Either returns a tuple of
(frame, length), or throws an exception. The returned frame may be None
if the frame is of unknown type.
"""
try:
frame, length = Frame.parse_frame_header(data[:9])
except UnknownFrameError as e: # Platform-specific: Hyperframe < 5.0
# Here we do something a bit odd. We want to consume the frame data
# as consistently as possible, but we also don't ever want to yield
# None. Instead, we make sure that, if there is no frame, we
# recurse into ourselves.
# This can only happen now on older versions of hyperframe.
# TODO: Remove in 3.0
length = e.length
frame = None
except ValueError as e:
# The frame header is invalid. This is a ProtocolError
raise ProtocolError("Invalid frame header received: %s" % str(e))
return frame, length
def _validate_frame_length(self, length):
"""
Confirm that the frame is an appropriate length.
"""
if length > self.max_frame_size:
raise FrameTooLargeError(
"Received overlong frame: length %d, max %d" %
(length, self.max_frame_size)
)
def _update_header_buffer(self, f):
"""
Updates the internal header buffer. Returns a frame that should replace
the current one. May throw exceptions if this frame is invalid.
"""
# Check if we're in the middle of a headers block. If we are, this
# frame *must* be a CONTINUATION frame with the same stream ID as the
# leading HEADERS or PUSH_PROMISE frame. Anything else is a
# ProtocolError. If the frame *is* valid, append it to the header
# buffer.
if self._headers_buffer:
stream_id = self._headers_buffer[0].stream_id
valid_frame = (
f is not None and
isinstance(f, ContinuationFrame) and
f.stream_id == stream_id
)
if not valid_frame:
raise ProtocolError("Invalid frame during header block.")
# Append the frame to the buffer.
self._headers_buffer.append(f)
if len(self._headers_buffer) > CONTINUATION_BACKLOG:
raise ProtocolError("Too many continuation frames received.")
# If this is the end of the header block, then we want to build a
# mutant HEADERS frame that's massive. Use the original one we got,
# then set END_HEADERS and set its data appopriately. If it's not
# the end of the block, lose the current frame: we can't yield it.
if 'END_HEADERS' in f.flags:
f = self._headers_buffer[0]
f.flags.add('END_HEADERS')
f.data = b''.join(x.data for x in self._headers_buffer)
self._headers_buffer = []
else:
f = None
elif (isinstance(f, (HeadersFrame, PushPromiseFrame)) and
'END_HEADERS' not in f.flags):
# This is the start of a headers block! Save the frame off and then
# act like we didn't receive one.
self._headers_buffer.append(f)
f = None
return f
# The methods below support the iterator protocol.
def __iter__(self):
return self
def next(self): # Python 2
# First, check that we have enough data to successfully parse the
# next frame header. If not, bail. Otherwise, parse it.
if len(self.data) < 9:
raise StopIteration()
try:
f, length = self._parse_frame_header(self.data)
except InvalidFrameError: # pragma: no cover
raise ProtocolError("Received frame with invalid frame header.")
# Next, check that we have enough length to parse the frame body. If
# not, bail, leaving the frame header data in the buffer for next time.
if len(self.data) < length + 9:
raise StopIteration()
# Confirm the frame has an appropriate length.
self._validate_frame_length(length)
# Don't try to parse the body if we didn't get a frame we know about:
# there's nothing we can do with it anyway.
if f is not None:
try:
f.parse_body(memoryview(self.data[9:9+length]))
except InvalidFrameError:
raise FrameDataMissingError("Frame data missing or invalid")
# At this point, as we know we'll use or discard the entire frame, we
# can update the data.
self.data = self.data[9+length:]
# Pass the frame through the header buffer.
f = self._update_header_buffer(f)
# If we got a frame we didn't understand or shouldn't yield, rather
# than return None it'd be better if we just tried to get the next
# frame in the sequence instead. Recurse back into ourselves to do
# that. This is safe because the amount of work we have to do here is
# strictly bounded by the length of the buffer.
return f if f is not None else self.next()
def __next__(self): # Python 3
return self.next()
| mpl-2.0 | -1,467,161,729,018,229,000 | 38.673913 | 79 | 0.606712 | false |
pidah/st2contrib | packs/puppet/actions/lib/remote_actions.py | 12 | 1209 | import sys
import pipes
import subprocess
__all__ = [
'PuppetBaseAction'
]
class PuppetBaseAction(object):
PUPPET_BINARY = 'puppet'
def _run_command(self, cmd):
cmd_string = ' '.join(pipes.quote(s) for s in cmd)
sys.stderr.write('Running command "%s"\n' % (cmd_string))
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
exit_code = process.returncode
return self._handle_command_result(exit_code=exit_code, stdout=stdout,
stderr=stderr)
def _get_full_command(self, args):
cmd = [self.PUPPET_BINARY] + args
return cmd
def _handle_command_result(self, exit_code, stdout, stderr):
if exit_code == 0:
sys.stderr.write('Command successfully finished\n')
else:
error = []
if stdout:
error.append(stdout)
if stderr:
error.append(stderr)
error = '\n'.join(error)
sys.stderr.write('Command exited with an error: %s\n' % (error))
sys.exit(exit_code)
| apache-2.0 | 375,029,634,741,588,740 | 27.785714 | 78 | 0.555004 | false |
theAprel/Archive | python/resize.py | 1 | 2142 | from xml.dom import minidom
import os
import sys
from glob import glob
import codecs
def resize(filename, directory, md5sumFile):
glob_files = glob(os.path.join(directory, u'**'), recursive=True)
with codecs.open(filename, 'r', encoding='utf-8') as f:
metadata_file_contents = f.read()
xmldoc = minidom.parseString(metadata_file_contents)
xml_files = xmldoc.getElementsByTagName('FILE')
md5 = {}
with codecs.open(md5sumFile, 'r', encoding='utf-8') as f:
content = [x.strip('\n') for x in f.readlines()]
for line in content:
parts = line.split(' ')
md5[parts[1]] = parts[0]
for f in xml_files:
path = f.attributes['path'].value
newPath = path[:-3] + 'mkv'
f.attributes['path'].value = newPath
f.getElementsByTagName('SIZE').item(0).childNodes[0].nodeValue = os.path.getsize(os.path.join(directory, newPath))
if f.getElementsByTagName('MD5').item(0) is None: # In case the METADATA file was generated w/o checksums
new_md5_element = xmldoc.createElement('MD5')
text_node = xmldoc.createTextNode(md5[newPath])
new_md5_element.appendChild(text_node)
f.appendChild(new_md5_element)
else:
f.getElementsByTagName('MD5').item(0).childNodes[0].nodeValue = md5[newPath]
found_it = False
for full_path in glob_files[:]:
if full_path.endswith(newPath):
glob_files.remove(full_path)
found_it = True
break #in case of dup file names over dirs, remove only one
if not found_it:
raise ValueError('No matching file was found on glob path', path)
if len(glob_files) != 0:
print("WARNING: The following files were expected by glob, but not found" + str(glob_files))
with codecs.open(filename[:-4] + '-NEW.xml', 'w', encoding='utf-8') as writer:
xmldoc.writexml(writer)
if __name__ == "__main__":
print("Usage: resize.py <METADATA.xml file> <directory of files> <converted MD5 checksum file>")
resize(sys.argv[1], sys.argv[2], sys.argv[3])
| gpl-3.0 | -726,337,320,388,052,500 | 40.192308 | 122 | 0.619981 | false |
southpawtech/TACTIC-DEV | src/tactic/ui/checkin/file_properties_wdg.py | 6 | 2336 | ###########################################################
#
# Copyright (c) 2011, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['FilePropertiesWdg']
from pyasm.common import Environment
from pyasm.search import Search
from pyasm.web import DivWdg
from tactic.ui.common import BaseRefreshWdg
import os
class FilePropertiesWdg(BaseRefreshWdg):
def get_display(my):
path = my.kwargs.get("path")
md5 = my.kwargs.get("md5")
snapshot_code = my.kwargs.get("snapshot_code")
top = my.top
top.add_style("padding: 10px")
top.add_color("background", "background", -5)
path_div = DivWdg()
top.add(path_div)
path_div.add("<b>Local Path: %s</b><br/>" % path)
path_div.add_style("font-size: 12px")
path_div.add_style("margin-bottom: 10px")
info_wdg = DivWdg()
info_wdg.add_color("background", "background3")
top.add(info_wdg)
info_wdg.add("md5: %s<br/>" % md5)
info_wdg.add("snapshot_code: %s<br/>" % snapshot_code)
info_wdg.add_style("padding: 5px")
search_key = my.kwargs.get("search_key")
sobject = Search.get_by_search_key(search_key)
# bit of a hack get the file system paths
#spath = Common.get_filesystem_name(path)
spath = path.replace(" ", "_")
search = Search("sthpw/file")
search.add_sobject_filter(sobject)
search.add_filter("source_path", spath)
search.add_order_by("timestamp desc")
files = search.get_sobjects()
'''
files_div = DivWdg()
files_div.add_style("margin: 5px")
files_div.add_style("padding: 5px")
files_div.add_border()
top.add(files_div)
'''
snapshots = []
for file in files:
snapshot = file.get_parent()
snapshots.append(snapshot)
from tactic.ui.panel import StaticTableLayoutWdg
table = StaticTableLayoutWdg(search_type="sthpw/snapshot", view="table", show_shelf=False)
table.set_sobjects(snapshots)
top.add(table)
return top
| epl-1.0 | -3,417,283,358,092,973,600 | 25.850575 | 98 | 0.589469 | false |
heromod/migrid | mig/cgi-bin/grep.py | 1 | 1096 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# grep - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import cgi
import cgitb
cgitb.enable()
from shared.functionality.grep import main
from shared.cgiscriptstub import run_cgi_script
run_cgi_script(main)
| gpl-2.0 | -8,289,459,494,071,087,000 | 30.314286 | 81 | 0.738139 | false |
diogo149/BooMLet | boomlet/storage.py | 1 | 1985 | try:
import cPickle as pickle
except ImportError:
import pickle
import joblib
import zlib
import os
import glob
import dill
def mkdir(filename):
""" try to make directory
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
def exists(filename):
return os.path.exists(filename)
def writes(filename, s):
with open(filename, 'w') as outfile:
outfile.write(s)
def reads(filename):
with open(filename) as infile:
return infile.read()
def compress(s, level=9):
return zlib.compress(s, level)
def decompress(s):
return zlib.decompress(s)
def pickle_load(filename):
"""
if this fails with a core dump, one may still be able to load a
pickle by importing pickle instead of cPickle
"""
with open(filename) as infile:
return pickle.load(infile)
def pickle_dump(filename, obj):
with open(filename, 'w') as outfile:
pickle.dump(obj, outfile, pickle.HIGHEST_PROTOCOL)
def pickle_loads(s):
return pickle.loads(s)
def pickle_dumps(obj):
return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
def joblib_load(filename, mmap_mode='r'):
return joblib.load(filename, mmap_mode)
def joblib_dump(filename, obj, compress=0):
return joblib.dump(obj, filename, compress)
def dill_load(filename):
with open(filename) as infile:
return dill.load(infile)
def dill_dump(filename, obj):
with open(filename, 'w') as outfile:
dill.dump(obj, outfile)
def dill_loads(s):
return dill.loads(s)
def dill_dumps(obj):
return dill.dumps(obj)
def glob_one(*args):
if len(args) == 1:
dirname = "."
hint, = args
elif len(args) == 2:
dirname, hint = args
else:
raise Exception("improper argument count: {}".format(args))
globbed = glob.glob1(dirname, "*" + hint + "*")
assert len(globbed) == 1, (dirname, hint, globbed)
return os.path.join(dirname, globbed[0])
| gpl-3.0 | 3,464,592,704,807,118,300 | 18.271845 | 67 | 0.647859 | false |
angelapper/edx-platform | common/djangoapps/static_replace/admin.py | 24 | 1452 | """
Django admin page for AssetBaseUrlConfig, which allows you to set the base URL
that gets prepended to asset URLs in order to serve them from, say, a CDN.
"""
from config_models.admin import ConfigurationModelAdmin
from django.contrib import admin
from .models import AssetBaseUrlConfig, AssetExcludedExtensionsConfig
class AssetBaseUrlConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for asset base URL.
"""
list_display = [
'base_url'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
class AssetExcludedExtensionsConfigAdmin(ConfigurationModelAdmin):
"""
Basic configuration for asset base URL.
"""
list_display = [
'excluded_extensions'
]
def get_list_display(self, request):
"""
Restore default list_display behavior.
ConfigurationModelAdmin overrides this, but in a way that doesn't
respect the ordering. This lets us customize it the usual Django admin
way.
"""
return self.list_display
admin.site.register(AssetBaseUrlConfig, AssetBaseUrlConfigAdmin)
admin.site.register(AssetExcludedExtensionsConfig, AssetExcludedExtensionsConfigAdmin)
| agpl-3.0 | -2,707,649,998,156,532,000 | 28.04 | 86 | 0.704545 | false |
EliteTK/qutebrowser | qutebrowser/browser/webkit/webkitinspector.py | 4 | 1310 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Customized QWebInspector for QtWebKit."""
from PyQt5.QtWebKitWidgets import QWebInspector
from qutebrowser.browser import inspector
class WebKitInspector(inspector.AbstractWebInspector):
"""A web inspector for QtWebKit."""
def __init__(self, parent=None):
super().__init__(parent)
qwebinspector = QWebInspector()
self._set_widget(qwebinspector)
def inspect(self, page):
self._check_developer_extras()
self._widget.setPage(page)
self.show()
| gpl-3.0 | -7,392,202,281,232,280,000 | 31.75 | 74 | 0.728244 | false |
google/certificate-transparency | python/ct/client/reporter_test.py | 3 | 4841 | #!/usr/bin/env python
import unittest
import sys
from collections import defaultdict
from absl import flags as gflags
from ct.client import reporter
from ct.client.db import cert_desc
from ct.client.db import sqlite_cert_db
from ct.client.db import sqlite_connection as sqlitecon
from ct.crypto import cert
from ct.proto import certificate_pb2
from ct.proto import client_pb2
from ct.test import test_config
STRICT_DER = cert.Certificate.from_der_file(
test_config.get_test_file_path('google_cert.der'), False).to_der()
NON_STRICT_DER = cert.Certificate.from_pem_file(
test_config.get_test_file_path('invalid_ip.pem'), False).to_der()
CHAIN_FILE = test_config.get_test_file_path('google_chain.pem')
CHAIN_DERS = [c.to_der() for c in cert.certs_from_pem_file(CHAIN_FILE)]
SELF_SIGNED_ROOT_DER = cert.Certificate.from_pem_file(
test_config.get_test_file_path('subrigo_net.pem'), False).to_der()
def readable_dn(dn_attribs):
return ",".join(["%s=%s" % (attr.type, attr.value) for attr in dn_attribs])
class CertificateReportTest(unittest.TestCase):
class CertificateReportBase(reporter.CertificateReport):
def __init__(self):
super(CertificateReportTest.CertificateReportBase, self).__init__()
def report(self):
super(CertificateReportTest.CertificateReportBase, self).report()
return self._certs
def reset(self):
self._certs = {}
def _batch_scanned_callback(self, result):
for desc, log_index in result:
self._certs[log_index] = desc
def test_scan_der_cert(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
def test_scan_der_cert_broken_cert(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, "asdf", [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertFalse(results[0].subject)
def test_scan_der_cert_check_non_strict(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, NON_STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertTrue(results[0].subject)
def test_entry_type_propogated(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, STRICT_DER, [], client_pb2.PRECERT_ENTRY),
(1, STRICT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 2)
self.assertEquals(results[0].entry_type, client_pb2.PRECERT_ENTRY)
self.assertEquals(results[1].entry_type, client_pb2.X509_ENTRY)
def test_issuer_and_root_issuer_populated_from_chain(self):
self.assertEqual(3, len(CHAIN_DERS))
report = self.CertificateReportBase()
report.scan_der_certs([(0, CHAIN_DERS[0], CHAIN_DERS[1:],
client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
issuer_cert = cert_desc.from_cert(cert.Certificate(CHAIN_DERS[1]))
root_cert = cert_desc.from_cert(cert.Certificate(CHAIN_DERS[2]))
self.assertEqual(readable_dn(results[0].issuer),
'C=US,O=Google Inc,CN=Google Internet Authority')
self.assertEqual(readable_dn(results[0].root_issuer),
'C=US,O=Equifax,OU=Equifax Secure Certificate Authority')
def test_chain_containing_only_root_handled(self):
report = self.CertificateReportBase()
report.scan_der_certs([(0, SELF_SIGNED_ROOT_DER, [], client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertEquals(results[0].entry_type, client_pb2.X509_ENTRY)
def test_issuer_public_key_populated_from_chain(self):
# Verify the test data is what is expected for this unit test.
self.assertEqual(3, len(CHAIN_DERS))
self.assertEqual(
cert.Certificate(CHAIN_DERS[1]).key_hash(hashfunc="sha256").encode('hex'),
'b6b95432abae57fe020cb2b74f4f9f9173c8c708afc9e732ace23279047c6d05')
report = self.CertificateReportBase()
report.scan_der_certs([(0, CHAIN_DERS[0], CHAIN_DERS[1:],
client_pb2.X509_ENTRY)])
results = report.report()
self.assertEqual(len(results), 1)
self.assertEqual(results[0].issuer_pk_sha256_hash.encode('hex'),
'b6b95432abae57fe020cb2b74f4f9f9173c8c708afc9e732ace23279047c6d05')
if __name__ == '__main__':
sys.argv = gflags.FLAGS(sys.argv)
unittest.main()
| apache-2.0 | 4,958,583,780,802,296,000 | 39.341667 | 86 | 0.650485 | false |
blueskycoco/zcl-y | rtthread-apps/tools/ua.py | 1 | 4190 | # File : ua.py
# Tool Script for building User Applications
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-02-07 Bernard The firstly version
#
import os
import sys
from SCons.Script import *
Rtt_Root = ''
BSP_Root = ''
Env = None
def BuildEnv(BSP_ROOT, RTT_ROOT):
if BSP_ROOT == None:
if os.getenv('BSP_ROOT'):
BSP_ROOT = os.getenv('BSP_ROOT')
else:
print 'Please set BSP(board support package) directory!'
exit(-1)
if not os.path.exists(BSP_ROOT):
print 'No BSP(board support package) directory found!'
exit(-1)
if RTT_ROOT == None:
# get RTT_ROOT from BSP_ROOT
sys.path = sys.path + [BSP_ROOT]
try:
import rtconfig
RTT_ROOT = rtconfig.RTT_ROOT
except Exception as e:
print 'Import rtconfig.py in BSP(board support package) failed.'
print e
exit(-1)
global Rtt_Root
global BSP_Root
Rtt_Root = RTT_ROOT
BSP_Root = BSP_ROOT
def BuildHostApplication(TARGET, SConscriptFile):
sys.path = sys.path + [os.path.join(os.getcwd(), 'tools', 'host')]
from building import PrepareHostModuleBuilding
Env = Environment()
PrepareHostModuleBuilding(Env)
objs = SConscript(SConscriptFile)
target = Env.Program(TARGET, objs)
return
def BuildApplication(TARGET, SConscriptFile, BSP_ROOT = None, RTT_ROOT = None):
global Env
global Rtt_Root
global BSP_Root
# build application in host
if BSP_ROOT == None and RTT_ROOT == None and not os.getenv('BSP_ROOT'):
BuildHostApplication(TARGET, SConscriptFile)
return
# handle BSP_ROOT and RTT_ROOT
BuildEnv(BSP_ROOT, RTT_ROOT)
sys.path = sys.path + [os.path.join(Rtt_Root, 'tools'), BSP_Root]
# get configuration from BSP
import rtconfig
from rtua import GetCPPPATH
from building import PrepareModuleBuilding
linkflags = rtconfig.M_LFLAGS + ' -e main'
CPPPATH = GetCPPPATH(BSP_Root, Rtt_Root)
if rtconfig.PLATFORM == 'cl':
Env = Environment(TARGET_ARCH='x86')
Env.Append(CCFLAGS=rtconfig.M_CFLAGS)
Env.Append(LINKFLAGS=rtconfig.M_LFLAGS)
Env.Append(CPPPATH=CPPPATH)
Env.Append(LIBS='rtthread', LIBPATH=BSP_Root)
Env.Append(CPPDEFINES=['RTT_IN_MODULE'])
Env.PrependENVPath('PATH', rtconfig.EXEC_PATH)
else:
Env = Environment(tools = ['mingw'],
AS = rtconfig.AS, ASFLAGS = rtconfig.AFLAGS,
CC = rtconfig.CC, CCFLAGS = rtconfig.M_CFLAGS,
CXX = rtconfig.CXX, AR = rtconfig.AR, ARFLAGS = '-rc',
LINK = rtconfig.LINK, LINKFLAGS = linkflags,
CPPPATH = CPPPATH)
PrepareModuleBuilding(Env, Rtt_Root, BSP_Root)
objs = SConscript(SConscriptFile)
# build program
if rtconfig.PLATFORM == 'cl':
dll_target = TARGET.replace('.mo', '.dll')
target = Env.SharedLibrary(dll_target, objs)
target = Command("$TARGET", dll_target, [Move(TARGET, dll_target)])
# target = dll_target
else:
target = Env.Program(TARGET, objs)
if hasattr(rtconfig, 'M_POST_ACTION'):
Env.AddPostAction(target, rtconfig.POST_ACTION)
if hasattr(rtconfig, 'M_BIN_PATH'):
Env.AddPostAction(target, [Copy(rtconfig.M_BIN_PATH, TARGET)])
| gpl-2.0 | 770,119,172,012,355,600 | 31.230769 | 79 | 0.644391 | false |
krintoxi/NoobSec-Toolkit | NoobSecToolkit /scripts/sshbackdoors/rpyc/experimental/splitbrain.py | 16 | 9095 | """
The Magnificent Splitbrain
.. versionadded:: 3.3
"""
import sys
import atexit
import threading
from contextlib import contextmanager
import functools
import gc
try:
import __builtin__ as builtins
except ImportError:
import builtins # python 3+
from types import ModuleType
router = threading.local()
routed_modules = set(["os", "os.path", "platform", "ntpath", "posixpath", "zipimport", "genericpath",
"posix", "nt", "signal", "time", "sysconfig", "_locale", "locale", "socket", "_socket", "ssl", "_ssl",
"struct", "_struct", "_symtable", "errno", "fcntl", "grp", "pwd", "select", "spwd", "syslog", "thread",
"_io", "io", "subprocess", "_subprocess", "datetime", "mmap", "msvcrt", "pdb", "bdb", "glob", "fnmatch",
#"_frozen_importlib", "imp", "exceptions"
])
class RoutedModule(ModuleType):
def __init__(self, realmod):
ModuleType.__init__(self, realmod.__name__, getattr(realmod, "__doc__", None))
object.__setattr__(self, "__realmod__", realmod)
object.__setattr__(self, "__file__", getattr(realmod, "__file__", None))
def __repr__(self):
if self.__file__:
return "<module %r from %r>" % (self.__name__, self.__file__)
else:
return "<module %r (built-in)>" % (self.__name__,)
def __dir__(self):
return dir(self.__currmod__)
def __getattribute__(self, name):
if name == "__realmod__":
return object.__getattribute__(self, "__realmod__")
elif name == "__name__":
return object.__getattribute__(self, "__name__")
elif name == "__currmod__":
modname = object.__getattribute__(self, "__name__")
if hasattr(router, "conn"):
return router.conn.modules[modname]
else:
return object.__getattribute__(self, "__realmod__")
else:
return getattr(self.__currmod__, name)
def __delattr__(self, name, val):
return setattr(self.__currmod__, name, val)
def __setattr__(self, name, val):
return setattr(self.__currmod__, name, val)
routed_sys_attrs = set(["byteorder", "platform", "getfilesystemencoding", "getdefaultencoding", "settrace",
"setprofile", "setrecursionlimit", "getprofile", "getrecursionlimit", "getsizeof", "gettrace",
"exc_clear", "exc_info", "exc_type", "last_type", "last_value", "last_traceback",
])
class RoutedSysModule(ModuleType):
def __init__(self):
ModuleType.__init__(self, "sys", sys.__doc__)
def __dir__(self):
return dir(sys)
def __getattribute__(self, name):
if name in routed_sys_attrs and hasattr(router, "conn"):
return getattr(router.conn.modules["sys"], name)
else:
return getattr(sys, name)
def __setattr__(self, name, value):
if name in routed_sys_attrs and hasattr(router, "conn"):
setattr(router.conn.modules["sys"], name, value)
else:
setattr(sys, name, value)
rsys = RoutedSysModule()
class RemoteModule(ModuleType):
def __init__(self, realmod):
ModuleType.__init__(self, realmod.__name__, getattr(realmod, "__doc__", None))
object.__setattr__(self, "__file__", getattr(realmod, "__file__", None))
def __repr__(self):
try:
self.__currmod__
except (AttributeError, ImportError):
return "<module %r (stale)>" % (self.__name__,)
if self.__file__:
return "<module %r from %r>" % (self.__name__, self.__file__)
else:
return "<module %r (built-in)>" % (self.__name__,)
def __dir__(self):
return dir(self.__currmod__)
def __getattribute__(self, name):
if name == "__name__":
return object.__getattribute__(self, "__name__")
elif name == "__currmod__":
modname = object.__getattribute__(self, "__name__")
if not hasattr(router, "conn"):
raise AttributeError("Module %r is not available in this context" % (modname,))
mod = router.conn.modules._ModuleNamespace__cache.get(modname)
if not mod:
raise AttributeError("Module %r is not available in this context" % (modname,))
return mod
else:
return getattr(self.__currmod__, name)
def __delattr__(self, name, val):
return setattr(self.__currmod__, name, val)
def __setattr__(self, name, val):
return setattr(self.__currmod__, name, val)
_orig_import = builtins.__import__
def _importer(modname, *args, **kwargs):
if not hasattr(router, "conn"):
return _orig_import(modname, *args, **kwargs)
existing = sys.modules.get(modname, None)
if type(existing) is RoutedModule:
return existing
mod = router.conn.modules[modname]
if existing and type(existing) is RemoteModule:
return existing
rmod = RemoteModule(mod)
sys.modules[modname] = rmod
return rmod
_enabled = False
_prev_builtins = {}
def enable_splitbrain():
"""Enables (activates) the Splitbrain machinery; must be called before entering
``splitbrain`` or ``localbrain`` contexts"""
global _enabled
if _enabled:
return
sys.modules["sys"] = rsys
for modname in routed_modules:
try:
realmod = _orig_import(modname, [], [], "*")
except ImportError:
continue
rmod = RoutedModule(realmod)
sys.modules[modname] = rmod
for ref in gc.get_referrers(realmod):
if not isinstance(ref, dict) or "__name__" not in ref or ref.get("__file__") is None:
continue
n = ref["__name__"]
if n in routed_modules or n.startswith("rpyc") or n.startswith("importlib") or n.startswith("imp"):
continue
for k, v in ref.items():
if v is realmod:
#print ("## %s.%s = %s" % (ref["__name__"], ref[k], modname))
ref[k] = rmod
builtins.__import__ = _importer
for funcname in ["open", "execfile", "file"]:
if not hasattr(builtins, funcname):
continue
def mkfunc(funcname, origfunc):
@functools.wraps(getattr(builtins, funcname))
def tlbuiltin(*args, **kwargs):
if hasattr(router, "conn"):
func = getattr(router.conn.builtins, funcname)
else:
func = origfunc
return func(*args, **kwargs)
return tlbuiltin
origfunc = getattr(builtins, funcname)
_prev_builtins[funcname] = origfunc
setattr(builtins, funcname, mkfunc(funcname, origfunc))
_enabled = True
def disable_splitbrain():
"""Disables (deactivates) the Splitbrain machinery"""
global _enabled
if not _enabled:
return
_enabled = False
for funcname, origfunc in _prev_builtins.items():
setattr(builtins, funcname, origfunc)
for modname, mod in sys.modules.items():
if isinstance(mod, RoutedModule):
sys.modules[modname] = mod.__realmod__
for ref in gc.get_referrers(mod):
if isinstance(ref, dict) and "__name__" in ref and ref.get("__file__") is not None:
for k, v in ref.items():
if v is mod:
ref[k] = mod.__realmod__
sys.modules["sys"] = sys
builtins.__import__ = _orig_import
atexit.register(disable_splitbrain)
@contextmanager
def splitbrain(conn):
"""Enter a splitbrain context in which imports take place over the given RPyC connection (expected to
be a SlaveService). You can enter this context only after calling ``enable()``"""
if not _enabled:
enable_splitbrain()
#raise ValueError("Splitbrain not enabled")
prev_conn = getattr(router, "conn", None)
prev_modules = sys.modules.copy()
router.conn = conn
prev_stdin = conn.modules.sys.stdin
prev_stdout = conn.modules.sys.stdout
prev_stderr = conn.modules.sys.stderr
conn.modules["sys"].stdin = sys.stdin
conn.modules["sys"].stdout = sys.stdout
conn.modules["sys"].stderr = sys.stderr
try:
yield
finally:
conn.modules["sys"].stdin = prev_stdin
conn.modules["sys"].stdout = prev_stdout
conn.modules["sys"].stderr = prev_stderr
sys.modules.clear()
sys.modules.update(prev_modules)
router.conn = prev_conn
if not router.conn:
del router.conn
@contextmanager
def localbrain():
"""Return to operate on the local machine. You can enter this context only after calling ``enable()``"""
if not _enabled:
raise ValueError("Splitbrain not enabled")
prev_conn = getattr(router, "conn", None)
prev_modules = sys.modules.copy()
if hasattr(router, "conn"):
del router.conn
try:
yield
finally:
sys.modules.clear()
sys.modules.update(prev_modules)
router.conn = prev_conn
if not router.conn:
del router.conn
| gpl-2.0 | 6,479,792,155,188,995,000 | 35.821862 | 111 | 0.574491 | false |
trabacus-softapps/openerp-8.0-cc | openerp/addons/report_intrastat/report/__init__.py | 65 | 1068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,898,483,556,413,244,000 | 47.545455 | 79 | 0.610487 | false |
hcs/mailman | src/mailman/model/tests/test_uid.py | 3 | 1472 | # Copyright (C) 2011-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Test the UID model class."""
from __future__ import absolute_import, unicode_literals
__metaclass__ = type
__all__ = [
]
import uuid
import unittest
from mailman.model.uid import UID
from mailman.testing.layers import ConfigLayer
class TestUID(unittest.TestCase):
layer = ConfigLayer
def test_record(self):
# Test that the .record() method works.
UID.record(uuid.UUID(int=11))
UID.record(uuid.UUID(int=99))
self.assertRaises(ValueError, UID.record, uuid.UUID(int=11))
def test_longs(self):
# In a non-test environment, the uuid will be a long int.
my_uuid = uuid.uuid4()
UID.record(my_uuid)
self.assertRaises(ValueError, UID.record, my_uuid)
| gpl-3.0 | -6,386,960,495,422,640,000 | 29.040816 | 78 | 0.707201 | false |
martinwicke/tensorflow | tensorflow/contrib/specs/python/summaries_test.py | 14 | 2900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for specs-related summarization functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.specs.python import specs
from tensorflow.contrib.specs.python import summaries
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SummariesTest(tf.test.TestCase):
def testStructure(self):
with self.test_session():
inputs_shape = (1, 18, 19, 5)
inputs = tf.constant(_rand(*inputs_shape))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(summaries.tf_spec_structure(spec,
input_shape=inputs_shape),
"_ var conv var biasadd relu")
def testStructureFromTensor(self):
with self.test_session():
inputs = tf.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(summaries.tf_spec_structure(spec, inputs),
"_ var conv var biasadd relu")
def testPrint(self):
with self.test_session():
inputs = tf.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_print(spec, inputs)
def testSummary(self):
with self.test_session():
inputs = tf.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
tf.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
summaries.tf_spec_summary(spec, inputs)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -814,147,306,897,229,400 | 35.25 | 80 | 0.633793 | false |
PATechmasters/techmaster-ctf | api/api/logger.py | 8 | 7209 | """
Manage loggers for the api.
"""
import logging, logging.handlers, time
import api
from bson import json_util
from flask import request, has_request_context
from flask import logging as flask_logging
from sys import stdout
from datetime import datetime
critical_error_timeout = 600
log = logging.getLogger(__name__)
class StatsHandler(logging.StreamHandler):
"""
Logs statistical information into the mongodb.
"""
time_format = "%H:%M:%S %Y-%m-%d"
action_parsers = {
"api.user.create_user_request":
lambda params, result=None: {
"username": params["username"],
"new_team": params["create-new-team"]
},
"api.achievement.process_achievement":
lambda aid, data, result=None: {
"aid": aid,
"success": result[0]
},
"api.autogen.grade_problem_instance":
lambda pid, tid, key, result=None: {
"pid": pid,
"key": key,
"correct": result["correct"]
},
"api.group.create_group":
lambda uid, group_name, result=None: {
"name": group_name,
"owner": uid
},
"api.group.join_group":
lambda tid, gid, result=None: {
"gid": gid
},
"api.group.leave_group":
lambda tid, gid, result=None: {
"gid": gid
},
"api.group.delete_group":
lambda gid, result=None: {
"gid": gid
},
"api.problem.submit_key":
lambda tid, pid, key, uid=None, ip=None, result=None: {
"pid": pid,
"key": key,
"success": result["correct"]
},
"api.problem_feedback.add_problem_feedback":
lambda pid, uid, feedback, result=None: {
"pid": pid,
"feedback": feedback
},
"api.user.update_password_request":
lambda params, uid=None, check_current=False, result=None: {},
"api.utilities.request_password_reset":
lambda username, result=None: {},
"api.team.create_team":
lambda params, result=None: params,
"api.team.assign_shell_account":
lambda tid, result=None: {},
"api.app.hint":
lambda pid, source, result=None: {"pid": pid, "source": source}
}
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
"""
Store record into the db.
"""
information = get_request_information()
result = record.msg
if type(result) == dict:
information.update({
"event": result["name"],
"time": datetime.now()
})
information["pass"] = True
information["action"] = {}
if "exception" in result:
information["action"]["exception"] = result["exception"]
information["pass"] = False
elif result["name"] in self.action_parsers:
action_parser = self.action_parsers[result["name"]]
result["kwargs"]["result"] = result["result"]
action_result = action_parser(*result["args"], **result["kwargs"])
information["action"].update(action_result)
api.common.get_conn().statistics.insert(information)
class ExceptionHandler(logging.StreamHandler):
"""
Logs exceptions into mongodb.
"""
def __init__(self):
logging.StreamHandler.__init__(self)
def emit(self, record):
"""
Store record into the db.
"""
information = get_request_information()
information.update({
"event": "exception",
"time": datetime.now(),
"trace": record.msg
})
api.common.get_conn().exceptions.insert(information)
class SevereHandler(logging.handlers.SMTPHandler):
messages = {}
def __init__(self):
logging.handlers.SMTPHandler.__init__(
self,
mailhost=api.utilities.smtp_url,
fromaddr=api.utilities.from_addr,
toaddrs=admin_emails,
subject="Critical Error in {}".format(api.config.competition_name),
credentials=(api.utilities.email_username, api.utilities.email_password),
secure=()
)
def emit(self, record):
"""
Don't excessively emit the same message.
"""
last_time = self.messages.get(record.msg, None)
if last_time is None or time.time() - last_time > critical_error_timeout:
super(SevereHandler, self).emit(record)
self.messages[record.msg] = time.time()
def set_level(name, level):
"""
Get and set log level of a given logger.
Args:
name: name of logger
level: level to set
"""
logger = use(name)
if logger:
logger.setLevel(level)
def use(name):
"""
Alias for logging.getLogger(name)
Args:
name: The name of the logger.
Returns:
The logging object.
"""
return logging.getLogger(name)
def get_request_information():
"""
Returns a dictionary of contextual information about the user at the time of logging.
Returns:
The dictionary.
"""
information = {}
if has_request_context():
information["request"] = {
"api_endpoint_method": request.method,
"api_endpoint": request.path,
"ip": request.remote_addr,
"platform": request.user_agent.platform,
"browser": request.user_agent.browser,
"browser_version": request.user_agent.version,
"user_agent":request.user_agent.string
}
if api.auth.is_logged_in():
user = api.user.get_user()
team = api.user.get_team()
groups = api.team.get_groups()
information["user"] = {
"username": user["username"],
"email": user["email"],
"team_name": team["team_name"],
"school": team["school"],
"groups": [group["name"] for group in groups]
}
return information
def setup_logs(args):
"""
Initialize the api loggers.
Args:
args: dict containing the configuration options.
"""
flask_logging.create_logger = lambda app: use(app.logger_name)
if not args.get("debug", True):
set_level("werkzeug", logging.ERROR)
level = [logging.WARNING, logging.INFO, logging.DEBUG][
min(args.get("verbose", 1), 2)]
internal_error_log = ExceptionHandler()
internal_error_log.setLevel(logging.ERROR)
log.root.setLevel(level)
log.root.addHandler(internal_error_log)
if api.utilities.enable_email:
severe_error_log = SevereHandler()
severe_error_log.setLevel(logging.CRITICAL)
log.root.addHandler(severe_error_log)
stats_log = StatsHandler()
stats_log.setLevel(logging.INFO)
log.root.addHandler(stats_log)
| mit | -1,053,602,422,361,827,800 | 26.515267 | 89 | 0.545013 | false |
jacobsenanaizabel/shoop | setup.py | 2 | 4305 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import setuptools
import shoop_setup_utils as utils
TOPDIR = os.path.abspath(os.path.dirname(__file__))
LONG_DESCRIPTION_FILE = os.path.join(TOPDIR, 'README.rst')
VERSION_FILE = os.path.join(TOPDIR, 'shoop', '_version.py')
# Release instructions
#
# 1. Update the Change Log (ChangeLog.rst)
# - Make sure all relevant changes since last release are listed
# - Remove the instruction bullet point ("List all changes after
# x.x.x here...")
# - Change the "Unreleased" header to appropriate version header.
# See header of the last release for example.
# 2. Update VERSION variable here: Increase and drop .post0.dev suffix
# 3. Update version and release variables in doc/conf.py
# 4. Commit changes of steps 1--3
# 5. Tag the commit (of step 4) with
# git tag -a -m "Shoop X.Y.Z" vX.Y.Z
# where X.Y.Z is the new version number (must be same as VERSION
# variable here)
# 6. Check the tag is OK and push it with
# git push origin refs/tags/vX.Y.Z
# 7. Do a post-release commit:
# - Add new "Unreleased" header and instruction bullet point to
# Change Log
# - Add ".post0.dev" suffix to VERSION variable here
NAME = 'shoop'
VERSION = '1.1.0.post0.dev'
DESCRIPTION = 'E-Commerce Platform'
AUTHOR = 'Shoop Ltd.'
AUTHOR_EMAIL = '[email protected]'
URL = 'http://shoop.io/'
LICENSE = 'AGPL-3.0' # https://spdx.org/licenses/
CLASSIFIERS = """
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: GNU Affero General Public License v3
Natural Language :: English
Programming Language :: JavaScript
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Topic :: Internet :: WWW/HTTP :: Site Management
Topic :: Office/Business
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
""".strip().splitlines()
utils.set_exclude_patters([
'build', 'doc',
'node_modules', 'bower_components',
'var', '__pycache__', 'LC_MESSAGES',
'.tox', 'venv*',
'.git', '.gitignore',
'local_settings.py',
])
REQUIRES = [
'Babel==1.3',
'Django>=1.8,<1.9',
'django-bootstrap3==6.1.0',
'django-countries==3.3',
'django-enumfields==0.7.4',
'django-filer==0.9.12',
'django-jinja==1.4.1',
'django-mptt==0.7.4',
'django-parler==1.5',
'django-polymorphic==0.7.1',
'django-registration-redux==1.2',
'django-timezone-field==1.2',
'djangorestframework==3.1.3',
'factory-boy==2.5.2',
'fake-factory==0.5.2',
'jsonfield==1.0.3',
'Markdown==2.6.2',
'pytz==2015.4',
'requests==2.7.0',
'six==1.9.0',
'Jinja2==2.8'
]
REQUIRES_FOR_PYTHON2_ONLY = [
'enum34==1.0.4',
]
EXTRAS_REQUIRE = {
':python_version=="2.7"': REQUIRES_FOR_PYTHON2_ONLY,
'docs': [
'Sphinx==1.3.1',
],
'testing': utils.get_test_requirements_from_tox_ini(TOPDIR),
'coding-style': [
'flake8==2.4.1',
'mccabe==0.3.1',
'pep8==1.5.7',
'pep8-naming==0.2.2',
'pyflakes==0.8.1',
],
}
EXTRAS_REQUIRE['everything'] = list(set(sum(EXTRAS_REQUIRE.values(), [])))
if __name__ == '__main__':
if 'register' in sys.argv or 'upload' in sys.argv:
raise EnvironmentError('Registering and uploading is blacklisted')
version = utils.get_version(VERSION, TOPDIR, VERSION_FILE)
utils.write_version_to_file(version, VERSION_FILE)
setuptools.setup(
name=NAME,
version=version,
description=DESCRIPTION,
long_description=utils.get_long_description(LONG_DESCRIPTION_FILE),
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
classifiers=CLASSIFIERS,
install_requires=REQUIRES,
tests_require=EXTRAS_REQUIRE['testing'],
extras_require=EXTRAS_REQUIRE,
packages=utils.find_packages(),
include_package_data=True,
cmdclass=utils.COMMANDS,
)
| agpl-3.0 | -4,242,194,638,524,201,500 | 29.75 | 75 | 0.641347 | false |
sfanous/Pyecobee | pyecobee/objects/device.py | 1 | 2502 | """
This module is home to the Device class
"""
from pyecobee.ecobee_object import EcobeeObject
class Device(EcobeeObject):
"""
This class has been auto generated by scraping
https://www.ecobee.com/home/developer/api/documentation/v1/objects/Device.shtml
Attribute names have been generated by converting ecobee property
names from camelCase to snake_case.
A getter property has been generated for each attribute.
A setter property has been generated for each attribute whose value
of READONLY is "no".
An __init__ argument without a default value has been generated if
the value of REQUIRED is "yes".
An __init__ argument with a default value of None has been generated
if the value of REQUIRED is "no".
"""
__slots__ = ['_device_id', '_name', '_sensors', '_outputs']
attribute_name_map = {
'device_id': 'deviceId',
'deviceId': 'device_id',
'name': 'name',
'sensors': 'sensors',
'outputs': 'outputs',
}
attribute_type_map = {
'device_id': 'int',
'name': 'six.text_type',
'sensors': 'List[Sensor]',
'outputs': 'List[Output]',
}
def __init__(self, device_id=None, name=None, sensors=None, outputs=None):
"""
Construct a Device instance
"""
self._device_id = device_id
self._name = name
self._sensors = sensors
self._outputs = outputs
@property
def device_id(self):
"""
Gets the device_id attribute of this Device instance.
:return: The value of the device_id attribute of this Device
instance.
:rtype: int
"""
return self._device_id
@property
def name(self):
"""
Gets the name attribute of this Device instance.
:return: The value of the name attribute of this Device
instance.
:rtype: six.text_type
"""
return self._name
@property
def sensors(self):
"""
Gets the sensors attribute of this Device instance.
:return: The value of the sensors attribute of this Device
instance.
:rtype: List[Sensor]
"""
return self._sensors
@property
def outputs(self):
"""
Gets the outputs attribute of this Device instance.
:return: The value of the outputs attribute of this Device
instance.
:rtype: List[Output]
"""
return self._outputs
| mit | 2,657,890,437,476,544,000 | 24.793814 | 83 | 0.594724 | false |
aisipos/django | django/db/models/fields/files.py | 9 | 19297 | import datetime
import os
import posixpath
import warnings
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.db.models import signals
from django.db.models.fields import Field
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self, mode='rb'):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, mode)
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._get_file(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
setattr(self.instance, self.field.name, self.name)
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/path/to/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
if instance is None:
return self
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# Make sure that the instance is correct.
elif isinstance(file, FieldFile) and instance is not file.instance:
file.instance = instance
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
warnings.warn(
'FileField now delegates file name and folder processing to the '
'storage. get_directory_name() will be removed in Django 2.0.',
RemovedInDjango20Warning, stacklevel=2
)
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
warnings.warn(
'FileField now delegates file name and folder processing to the '
'storage. get_filename() will be removed in Django 2.0.',
RemovedInDjango20Warning, stacklevel=2
)
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
"""
Apply (if callable) or prepend (if a string) upload_to to the filename,
then delegate further processing of the name to the storage backend.
Until the storage layer, all file paths are expected to be Unix style
(with forward slashes).
"""
if callable(self.upload_to):
filename = self.upload_to(instance, filename)
else:
dirname = force_text(datetime.datetime.now().strftime(force_str(self.upload_to)))
filename = posixpath.join(dirname, filename)
return self.storage.generate_filename(filename)
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field)) or
(self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| bsd-3-clause | -693,912,462,279,046,000 | 38.787629 | 104 | 0.627662 | false |
atilag/qiskit-sdk-py | qiskit/mapper/_coupling.py | 1 | 6637 | # -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Directed graph object for representing coupling between qubits.
The nodes of the graph correspond to named qubits and the directed edges
indicate which qubits are coupled and the permitted direction of CNOT gates.
The object has a distance function that can be used to map quantum circuits
onto a device with this coupling.
"""
from collections import OrderedDict
import networkx as nx
from ._couplingerror import CouplingError
def coupling_dict2list(couplingdict):
"""Convert coupling map dictionary into list.
Example dictionary format: {0: [1, 2], 1: [2]}
Example list format: [[0, 1], [0, 2], [1, 2]]
We do not do any checking of the input.
Return coupling map in list format.
"""
if not couplingdict:
return None
couplinglist = []
for ctl, tgtlist in couplingdict.items():
for tgt in tgtlist:
couplinglist.append([ctl, tgt])
return couplinglist
def coupling_list2dict(couplinglist):
"""Convert coupling map list into dictionary.
Example list format: [[0, 1], [0, 2], [1, 2]]
Example dictionary format: {0: [1, 2], 1: [2]}
We do not do any checking of the input.
Return coupling map in dict format.
"""
if not couplinglist:
return None
couplingdict = {}
for pair in couplinglist:
if pair[0] in couplingdict:
couplingdict[pair[0]].append(pair[1])
else:
couplingdict[pair[0]] = [pair[1]]
return couplingdict
class Coupling:
"""
Directed graph specifying fixed coupling.
Nodes correspond to qubits and directed edges correspond to permitted
CNOT gates
"""
# pylint: disable=invalid-name
def __init__(self, couplingdict=None):
"""
Create coupling graph.
By default, the coupling graph has no nodes. The optional couplingdict
specifies the graph as an adjacency list. For example,
couplingdict = {0: [1, 2], 1: [2]}.
"""
# self.qubits is dict from qubit (regname,idx) tuples to node indices
self.qubits = OrderedDict()
# self.index_to_qubit is a dict from node indices to qubits
self.index_to_qubit = {}
# self.node_counter is integer counter for labeling nodes
self.node_counter = 0
# self.G is the coupling digraph
self.G = nx.DiGraph()
# self.dist is a dict of dicts from node pairs to distances
# it must be computed, it is the distance on the digraph
self.dist = None
# Add edges to the graph if the couplingdict is present
if couplingdict is not None:
for v0, alist in couplingdict.items():
for v1 in alist:
regname = "q"
self.add_edge((regname, v0), (regname, v1))
self.compute_distance()
def size(self):
"""Return the number of qubits in this graph."""
return len(self.qubits)
def get_qubits(self):
"""Return the qubits in this graph as a sorted (qreg, index) tuples."""
return sorted(list(self.qubits.keys()))
def get_edges(self):
"""Return a list of edges in the coupling graph.
Each edge is a pair of qubits and each qubit is a tuple (qreg, index).
"""
return list(map(lambda x: (self.index_to_qubit[x[0]],
self.index_to_qubit[x[1]]), self.G.edges()))
def add_qubit(self, name):
"""
Add a qubit to the coupling graph.
name = tuple (regname, idx) for qubit
"""
if name in self.qubits:
raise CouplingError("%s already in coupling graph" % name)
self.node_counter += 1
self.G.add_node(self.node_counter)
self.G.node[self.node_counter]["name"] = name
self.qubits[name] = self.node_counter
self.index_to_qubit[self.node_counter] = name
def add_edge(self, s_name, d_name):
"""
Add directed edge to coupling graph.
s_name = source qubit tuple
d_name = destination qubit tuple
"""
if s_name not in self.qubits:
self.add_qubit(s_name)
if d_name not in self.qubits:
self.add_qubit(d_name)
self.G.add_edge(self.qubits[s_name], self.qubits[d_name])
def connected(self):
"""
Test if the graph is connected.
Return True if connected, False otherwise
"""
return nx.is_weakly_connected(self.G)
def compute_distance(self):
"""
Compute the undirected distance function on pairs of nodes.
The distance map self.dist is computed from the graph using
all_pairs_shortest_path_length.
"""
if not self.connected():
raise CouplingError("coupling graph not connected")
lengths = dict(nx.all_pairs_shortest_path_length(self.G.to_undirected()))
self.dist = {}
for i in self.qubits.keys():
self.dist[i] = {}
for j in self.qubits.keys():
self.dist[i][j] = lengths[self.qubits[i]][self.qubits[j]]
def distance(self, q1, q2):
"""Return the undirected distance between qubit q1 to qubit q2."""
if self.dist is None:
raise CouplingError("distance has not been computed")
if q1 not in self.qubits:
raise CouplingError("%s not in coupling graph" % q1)
if q2 not in self.qubits:
raise CouplingError("%s not in coupling graph" % q2)
return self.dist[q1][q2]
def __str__(self):
"""Return a string representation of the coupling graph."""
s = "qubits: "
s += ", ".join(["%s[%d] @ %d" % (k[0], k[1], v)
for k, v in self.qubits.items()])
s += "\nedges: "
s += ", ".join(["%s[%d]-%s[%d]" % (e[0][0], e[0][1], e[1][0], e[1][1])
for e in self.get_edges()])
return s
| apache-2.0 | 4,709,168,070,103,641,000 | 33.388601 | 81 | 0.598614 | false |
LeeKamentsky/CellProfiler | cellprofiler/tests/test_workspace.py | 2 | 2460 | """test_workspace.py - test the workspace
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import logging
logger = logging.getLogger(__name__)
import os
import h5py
import tempfile
import unittest
import cellprofiler.measurements as cpmeas
import cellprofiler.pipeline as cpp
import cellprofiler.workspace as cpw
from cellprofiler.utilities.hdf5_dict import \
FILE_LIST_GROUP, TOP_LEVEL_GROUP_NAME
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.workspace_files = []
def tearDown(self):
for path in self.workspace_files:
try:
os.remove(path)
except:
logger.warn("Failed to close file %s" % path,
exc_info=1)
def make_workspace_file(self):
'''Make a very basic workspace file'''
pipeline = cpp.Pipeline()
pipeline.init_modules()
m = cpmeas.Measurements()
workspace = cpw.Workspace(pipeline, None, m, None, m, None)
fd, path = tempfile.mkstemp(".cpproj")
file_list = workspace.get_file_list()
file_list.add_files_to_filelist(
["http://cellprofiler.org/ExampleFlyImages/01_POS002_D.TIF"])
workspace.save(path)
self.workspace_files.append(path)
os.close(fd)
return path
def test_01_01_is_workspace_file(self):
path = self.make_workspace_file()
self.assertTrue(cpw.is_workspace_file(path))
def test_01_02_is_not_workspace_file(self):
self.assertFalse(cpw.is_workspace_file(__file__))
for group in TOP_LEVEL_GROUP_NAME, FILE_LIST_GROUP:
path = self.make_workspace_file()
h5file = h5py.File(path)
del h5file[group]
h5file.close()
self.assertFalse(cpw.is_workspace_file(path))
def test_01_03_file_handle_closed(self):
# regression test of issue #1326
path = self.make_workspace_file()
self.assertTrue(cpw.is_workspace_file(path))
os.remove(path)
self.workspace_files.remove(path)
self.assertFalse(os.path.isfile(path))
| gpl-2.0 | -2,126,151,733,260,320,500 | 31.381579 | 73 | 0.629268 | false |
magnusax/ml-meta-wrapper | gazer/visualize.py | 1 | 1824 | import sys
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class Visualizer():
def __init__(self, *args):
pass
def show_performance(self, list_of_tuples, fig_size=(9,9), font_scale=1.1, file=''):
"""
Parameters: list_of_tuples:
- list containing (clf_name, clf_performance) tuples for each
classifier we wish to visualize
fig_size:
- set figure size (default: (9,9))
font_scale:
- text scale in seaborn plots (default: 1.1)
file:
- string containing a valid filename (default: '')
Output: f: (matplotlib.pyplot.figure object)
"""
if not (isinstance(list_of_tuples, list) and isinstance(list_of_tuples[0], tuple)):
raise ValueError("Expecting a list of tuples")
sns.set(font_scale=font_scale)
sns.set_style("whitegrid")
data = list()
for name, value in list_of_tuples: data.append([name, value])
data = pd.DataFrame(data, columns=['classifier', 'performance'])
data.sort_values('performance', inplace=True, ascending=False)
"""
Close all figures (can close individual figure using plt.close(f)
where f is a matplotlib.pyplot.figure object)
"""
plt.close('all')
f = plt.figure(figsize=fig_size)
sns.barplot(x='performance', y='classifier', data=data)
plt.xlabel('performance')
if len(file)>1:
try:
plt.savefig(file)
except:
pass
return f
if __name__ == '__main__':
sys.exit(-1) | mit | -1,814,512,967,789,070,800 | 34.784314 | 91 | 0.51864 | false |
cisco-openstack/neutron | neutron/policy.py | 5 | 17481 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for neutron. Largely copied from nova.
"""
import collections
import logging as std_logging
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from oslo_utils import importutils
import six
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
ADVSVC_CTX_POLICY = 'context_is_advsvc'
def reset():
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init(conf=cfg.CONF, policy_file=None):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
_ENFORCER.load_rules(True)
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
resource = pluralized or ("%ss" % data[-1])
return (resource, data[0] != 'get')
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[const.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in six.iteritems(validate)]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
if not key:
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action, pluralized)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
manager = importutils.import_module('neutron.manager')
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def log_rule_list(match_rule):
if LOG.isEnabledFor(std_logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADMIN_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)
def check_is_advsvc(context):
"""Verify context has advsvc rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
if ADVSVC_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADVSVC_CTX_POLICY, credentials, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(_ENFORCER.rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
| apache-2.0 | 1,550,712,202,266,313,000 | 40.228774 | 79 | 0.603455 | false |
zhaochao/fuel-web | fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/test_nailgun_client.py | 4 | 4592 | # -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
from fuel_upgrade.clients import NailgunClient
from fuel_upgrade.tests import base
class TestNailgunClient(base.BaseTestCase):
def setUp(self):
mock_keystone = mock.MagicMock()
self.mock_request = mock_keystone.request
with mock.patch(
'fuel_upgrade.clients.nailgun_client.KeystoneClient',
return_value=mock_keystone):
self.nailgun = NailgunClient('127.0.0.1', 8000)
def test_create_release(self):
# test normal bahavior
self.mock_request.post.return_value = self.mock_requests_response(
201, '{ "id": "42" }')
response = self.nailgun.create_release({
'name': 'Havana on Ubuntu 12.04'})
self.assertEqual(response, {'id': '42'})
# test failed result
self.mock_request.post.return_value.status_code = 409
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.create_release,
{'name': 'Havana on Ubuntu 12.04'})
def test_delete_release(self):
# test normal bahavior
for status in (200, 204):
self.mock_request.delete.return_value = \
self.mock_requests_response(status, 'No Content')
response = self.nailgun.remove_release(42)
self.assertEqual(response, 'No Content')
# test failed result
self.mock_request.delete.return_value = self.mock_requests_response(
409, 'Conflict')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.remove_release,
42)
def test_create_notification(self):
# test normal bahavior
self.mock_request.post.return_value = self.mock_requests_response(
201,
'{ "id": "42" }')
response = self.nailgun.create_notification({
'topic': 'release',
'message': 'New release available!'})
self.assertEqual(response, {'id': '42'})
# test failed result
self.mock_request.post.return_value.status_code = 409
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.create_notification,
{'topic': 'release',
'message': 'New release available!'})
def test_delete_notification(self):
# test normal bahavior
for status in (200, 204):
self.mock_request.delete.return_value = \
self.mock_requests_response(status, 'No Content')
response = self.nailgun.remove_notification(42)
self.assertEqual(response, 'No Content')
# test failed result
self.mock_request.delete.return_value = self.mock_requests_response(
409, 'Conflict')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.remove_notification,
42)
def test_get_tasks(self):
# test positive cases
self.mock_request.get.return_value = self.mock_requests_response(
200, '[1,2,3]')
response = self.nailgun.get_tasks()
self.assertEqual(response, [1, 2, 3])
# test negative cases
self.mock_request.get.return_value = self.mock_requests_response(
502, 'Bad gateway')
self.assertRaises(
requests.exceptions.HTTPError, self.nailgun.get_tasks)
def test_put_deployment_tasks(self):
release = {'id': '1'}
tasks = []
self.mock_request.put.return_value = self.mock_requests_response(
200, '[]')
response = self.nailgun.put_deployment_tasks(release, tasks)
self.assertEqual(response, tasks)
self.mock_request.put.return_value = self.mock_requests_response(
502, 'Bad gateway')
self.assertRaises(
requests.exceptions.HTTPError,
self.nailgun.put_deployment_tasks,
release, tasks)
| apache-2.0 | 7,770,245,066,555,240,000 | 33.526316 | 78 | 0.612152 | false |
CIFASIS/pylearn2 | pylearn2/cross_validation/train_cv_extensions.py | 30 | 4560 | """
Cross-validation training extensions.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import os
from pylearn2.train import SerializationGuard
from pylearn2.train_extensions.best_params import MonitorBasedSaveBest
from pylearn2.utils import serial
class TrainCVExtension(object):
"""
TrainCV extension class. This class operates on the Train objects
corresponding to each fold of cross-validation, and therefore does not
implement an on_monitor method.
"""
def setup(self, trainers):
"""
Set up training extension.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
def on_save(self, trainers):
"""
Called by TrainCV just before saving models.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
class MonitorBasedSaveBestCV(TrainCVExtension):
"""
Save best model for each cross-validation fold. Based on
train_extensions.best_params.MonitorBasedSaveBest.
Parameters
----------
channel_name : str
Channel to monitor.
save_path : str or None, optional
Output filename. If None (the default), store_best_model must be
true.
store_best_model : bool, optional
Whether to store the best model in memory. If False (the default),
save_path must be defined. Note that the best model from each child
trainer must be accessed through the extensions for that trainer.
higher_is_better : bool, optional
Whether a higher channel value indicates a better model.
tag_key : str, optional
Unique key to associate with the best model. If provided, this key
will be modified to have a unique value for each child model.
save_folds : bool
Whether to write individual files for each cross-validation fold.
Only used if save_path is not None.
"""
def __init__(self, channel_name, save_path=None, store_best_model=False,
higher_is_better=False, tag_key=None, save_folds=False):
self.channel_name = channel_name
assert save_path is not None or store_best_model, (
"Either save_path must be defined or store_best_model must be " +
"True. (Or both.)")
self.save_path = save_path
self.store_best_model = store_best_model
self.higher_is_better = higher_is_better
self.best_cost = np.inf
self.best_model = None
self.tag_key = tag_key
self.save_folds = save_folds
def setup(self, trainers):
"""
Add tracking to all trainers.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
for k, trainer in enumerate(trainers):
if self.save_path is not None and self.save_folds:
path, ext = os.path.splitext(self.save_path)
save_path = path + '-{}'.format(k) + ext
else:
save_path = None
if self.tag_key is not None:
tag_key = '{}-{}'.format(self.tag_key, k)
else:
tag_key = None
extension = MonitorBasedSaveBest(
self.channel_name, save_path=save_path, store_best_model=True,
higher_is_better=self.higher_is_better, tag_key=tag_key)
trainer.extensions.append(extension)
def on_save(self, trainers):
"""
Save best model from each cross-validation fold.
Parameters
----------
trainers : list
List of Train objects belonging to the parent TrainCV object.
"""
if self.save_path is None:
return
models = []
for trainer in trainers:
for extension in trainer.extensions:
if isinstance(extension, MonitorBasedSaveBest):
models.append(extension.best_model)
break
assert len(models) == len(trainers)
try:
for trainer in trainers:
trainer.dataset._serialization_guard = SerializationGuard()
serial.save(self.save_path, models, on_overwrite='backup')
finally:
for trainer in trainers:
trainer.dataset._serialization_guard = None
| bsd-3-clause | 729,259,858,154,374,000 | 33.80916 | 78 | 0.602851 | false |
biocore/pyqi | tests/test_util.py | 1 | 1154 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2013, The BiPy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import division
__credits__ = ["Greg Caporaso", "Daniel McDonald", "Doug Wendel",
"Jai Ram Rideout"]
import pyqi
from unittest import TestCase, main
from pyqi.util import get_version_string
from pyqi.core.exception import MissingVersionInfoError
class UtilTests(TestCase):
def test_get_version_string(self):
"""Test extracting a version string given a module string."""
exp = pyqi.__version__
obs = get_version_string('pyqi')
self.assertEqual(obs, exp)
obs = get_version_string('pyqi.interfaces.optparse.config')
self.assertEqual(obs, exp)
with self.assertRaises(ImportError):
_ = get_version_string('hopefully.bogus.python.module')
if __name__ == '__main__':
main()
| bsd-3-clause | -5,579,018,053,600,460,000 | 31.055556 | 78 | 0.582322 | false |
brigittebigi/proceed | proceed/bin/guimanager.py | 1 | 6375 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = """epytext"""
__authors___ = """Brigitte Bigi ([email protected])"""
__copyright__ = """Copyright (C) 2013-2015 Brigitte Bigi"""
"""
Graphical User Interface to manage documents of a conference.
"""
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
import sys
import os.path
from argparse import ArgumentParser
import traceback
import tkMessageBox
import logging
# ---------------------------------------------------------------------------
# VERIFY PYTHON
# -------------
if sys.version_info < (2, 7):
tkMessageBox.showwarning(
"Python Error...",
"Your python version is too old. Proceed requires 2.7\n. Verify your python installation and try again."
)
sys.exit(1)
if sys.version_info >= (3, 0):
tkMessageBox.showwarning(
"Python Error...",
"Your python version is not appropriate. Proceed requires 2.7\n. Verify your python installation and try again."
)
sys.exit(1)
# VERIFY WXPYTHON
# ----------------
try:
import wx
except ImportError,e:
import tkMessageBox
tkMessageBox.showwarning(
"WxPython Error...",
"WxPython is not installed on your system.\n. Verify your installation and try again."
)
sys.exit(1)
try:
wxv = wx.version().split()[0]
except Exception:
wxv = '2'
if int(wxv[0]) < 3:
tkMessageBox.showwarning(
"WxPython Warning...",
'Your version of wxpython is too old. You could encounter problem while using Proceed.\n'
'Please, perform the update at http://wxpython.org/download.php and restart Proceed.\n\n'
'For any help, see Proceed installation page.')
# THEN, VERIFY Manager
# ------------------
# Make sure that we can import libraries
PROGRAM = os.path.abspath(__file__)
PROCEED = os.path.join(os.path.dirname( os.path.dirname( PROGRAM ) ), "src")
sys.path.insert(0,PROCEED)
try:
from wxgui.manager import MainFrame
from utils.commons import setup_logging, test_pdflatex, test_xelatex, test_pdftk
except ImportError as e:
import tkMessageBox
tkMessageBox.showwarning(
"Installation Error...",
"A problem occurred when launching this program:\n'"+str(e)
)
print traceback.format_exc()
sys.exit(1)
# ---------------------------------------------------------------------------
# Install Gettext
# ---------------------------------------------------------------------------
def install_gettext_in_builtin_namespace():
def _(message):
return message
import __builtin__
if not "_" in __builtin__.__dict__:
__builtin__.__dict__["_"] = _
# ---------------------------------------------------------------------------
# Main application
# ---------------------------------------------------------------------------
# Log
log_level = 0
log_file = None
setup_logging(log_level, log_file)
# Gettext
install_gettext_in_builtin_namespace()
# Arguments
# ------------------------------------------------------------------------
parser = ArgumentParser(usage="%s directory" % os.path.basename(PROGRAM), description="Proceed Graphical User Interface.")
parser.add_argument("files", nargs="*", help='Input directory with conference file(s)')
args = parser.parse_args()
# ----------------------------------------------------------------------------
# Proceed GUI is here:
# ----------------------------------------------------------------------------
# Create the wxapp
mainmanager = wx.App(redirect=True)
# Create the main frame
try:
logging.debug('Welcome to Proceed')
frame = MainFrame()
mainmanager.SetTopWindow(frame)
if test_pdflatex( ) is False:
dial = wx.MessageDialog(None, 'pdflatex is not installed on your system.\nThe automatic generation WILL NOT WORK.', 'Exclamation',
wx.OK | wx.ICON_EXCLAMATION)
dial.ShowModal()
if test_xelatex( ) is False:
dial = wx.MessageDialog(None, 'xetex is not installed on your system.\nThe automatic generation WILL NOT WORK.', 'Exclamation',
wx.OK | wx.ICON_EXCLAMATION)
dial.ShowModal()
if test_pdftk( ) is False:
dial = wx.MessageDialog(None, 'pdftk is not installed on your system.\nThe automatic generation WILL NOT WORK.', 'Exclamation',
wx.OK | wx.ICON_EXCLAMATION)
dial.ShowModal()
frame.Show()
except:
tkMessageBox.showwarning(
"Proceed Error...",
"A problem occurred when creating the Proceed graphical user interface.\nThe error is: %s"%(str(e))
)
print traceback.format_exc()
mainmanager.MainLoop()
# ---------------------------------------------------------------------------
| gpl-3.0 | 8,638,812,614,768,123,000 | 32.031088 | 138 | 0.512627 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.