repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Hasimir/pyjs | pyjswidgets/pyjamas/ui/DragWidget.py | 9 | 3080 | # Copyright (C) 2010 Jim Washington
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import Factory
from pyjamas import DOM
from pyjamas.ui.Widget import Widget
from pyjamas.ui.MouseListener import MouseHandler
from pyjamas.ui.DragHandler import DragHandler
from pyjamas.dnd import makeDraggable, DNDHelper
import pyjd
class DragWidget(object):
"""
Mix-in class for a draggable widget.
Override DragHandler on*** methods to enable drag behavior.
At runtime, we change the implementation based on html5
dra-and-drop capabilities of the engine.
"""
pass
class DragContainer(object):
"""
mixin providing drag handlers for contained draggables
events bubble up to here. event.target will be the actual draggable
This class is the same as DragWidget, but does not make itself draggable.
At runtime, we change the implementation based on html5
drag-and-drop capabilities of the engine.
"""
pass
class Draggable(Widget):
def makeDraggable(self):
makeDraggable(self)
class Html5DragContainer(Widget, DragHandler):
def __init__(self, **kw):
if (not hasattr(self, 'attached')) or kw:
Widget.__init__(self, **kw)
DragHandler.__init__(self)
self.addDragListener(self)
class MouseDragContainer(Widget, MouseHandler, DragHandler):
def __init__(self, **kw):
if (not hasattr(self, 'attached')) or kw:
Widget.__init__(self, **kw)
MouseHandler.__init__(self)
self.addMouseListener(DNDHelper.dndHelper)
DragHandler.__init__(self)
self.addDragListener(self)
class Html5DragWidget(Html5DragContainer, Draggable):
def __init__(self, **kw):
Html5DragContainer.__init__(self, **kw)
self.makeDraggable()
class MouseDragWidget(MouseDragContainer, Draggable):
def __init__(self, **kw):
MouseDragContainer.__init__(self, **kw)
self.makeDraggable()
def init(is_native=None):
global DragWidget, DragContainer
if is_native is None:
html5_dnd = hasattr(DOM.createElement('span'), 'draggable')
else:
html5_dnd = is_native
if html5_dnd:
DragContainer = Html5DragContainer
DragWidget = Html5DragWidget
else:
DragContainer = MouseDragContainer
DragWidget = MouseDragWidget
if pyjd.is_desktop:
init(pyjd.native_dnd)
else:
init(None)
Factory.registerClass('pyjamas.ui.DragWidget', 'DragWidget', DragWidget)
Factory.registerClass('pyjamas.ui.DragWidget', 'DragContainer', DragContainer)
| apache-2.0 | -772,960,283,447,689,900 | 29.49505 | 78 | 0.695779 | false |
c960657/dd-agent | tests/core/test_wmi.py | 1 | 28021 | # pylint: disable=E0401
# stdlib
from functools import partial
import logging
import time
import unittest
# 3rd
from mock import Mock, patch
# project
from tests.checks.common import Fixtures
from utils.timeout import TimeoutException
log = logging.getLogger(__name__)
WMISampler = None
def load_fixture(f, args=None):
"""
Build a WMI query result from a file and given parameters.
"""
properties = []
args = args or []
def extract_line(line):
"""
Extract a property name, value and the qualifiers from a fixture line.
Return (property name, property value, property qualifiers)
"""
property_counter_type = ""
try:
property_name, property_value, property_counter_type = line.split(" ")
except ValueError:
property_name, property_value = line.split(" ")
property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \
if property_counter_type else []
return property_name, property_value, property_qualifiers
# Build from file
data = Fixtures.read_file(f)
for l in data.splitlines():
property_name, property_value, property_qualifiers = extract_line(l)
properties.append(
Mock(Name=property_name, Value=property_value, Qualifiers_=property_qualifiers)
)
# Append extra information
args = args if isinstance(args, list) else [args]
for arg in args:
property_name, property_value = arg
properties.append(Mock(Name=property_name, Value=property_value, Qualifiers_=[]))
return [Mock(Properties_=properties)]
class Counter(object):
def __init__(self):
self.value = 0
def __iadd__(self, other):
self.value += other
return self
def __eq__(self, other):
return self.value == other
def __str__(self):
return str(self.value)
def reset(self):
self.value = 0
class SWbemServices(object):
"""
SWbemServices a.k.a. (mocked) WMI connection.
Save connection parameters so it can be tested.
"""
# `ExecQuery` metadata
_exec_query_call_count = Counter()
_exec_query_run_time = 0
# Class attr to save the last wmi query and flags
_last_wmi_query = None
_last_wmi_flags = None
def __init__(self, wmi_conn_args):
super(SWbemServices, self).__init__()
self._wmi_conn_args = wmi_conn_args
@classmethod
def reset(cls):
"""
Dirty patch to reset `SWbemServices.ExecQuery.call_count` and
`SWbemServices._exec_query_run_time` to 0, and the wmi query params
"""
cls._exec_query_call_count.reset()
cls._exec_query_run_time = 0
cls._last_wmi_query = None
cls._last_wmi_flags = None
@classmethod
def get_last_wmi_query(cls):
"""
Return the last WMI query submitted via the WMI connection.
"""
return cls._last_wmi_query
@classmethod
def get_last_wmi_flags(cls):
"""
Return the last WMI flags submitted via the WMI connection.
"""
return cls._last_wmi_flags
def get_conn_args(self):
"""
Return parameters used to set up the WMI connection.
"""
return self._wmi_conn_args
def ExecQuery(self, query, query_language, flags):
"""
Mocked `SWbemServices.ExecQuery` method.
"""
# Comply with `ExecQuery` metadata
self._exec_query_call_count += 1
time.sleep(self._exec_query_run_time)
# Save last passed parameters
SWbemServices._last_wmi_query = query
SWbemServices._last_wmi_flags = flags
# Mock a result
results = []
if query in [
"Select AvgDiskBytesPerWrite,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk", # noqa
"Select AvgDiskBytesPerWrite,FreeMegabytes,Name from Win32_PerfFormattedData_PerfDisk_LogicalDisk" # noqa
]:
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "C:"))
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "D:"))
if query == "Select CounterRawCount,CounterCounter,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
# Mock a previous and a current sample
sample_file = "win32_perfrawdata_perfos_system_previous" if flags == 131120\
else "win32_perfrawdata_perfos_system_current"
results += load_fixture(sample_file, ("Name", "C:"))
results += load_fixture(sample_file, ("Name", "D:"))
if query == "Select UnknownCounter,MissingProperty,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "C:"))
if query in [
"Select NonDigit,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk",
"Select FreeMegabytes,NonDigit from Win32_PerfFormattedData_PerfDisk_LogicalDisk",
]: # noqa
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", [("Name", "C:"), ("NonDigit", "Foo")]) # noqa
if query == "Select IOReadBytesPerSec,IDProcess from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )" \
or query == "Select IOReadBytesPerSec,UnknownProperty from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process")
if query == "Select IOReadBytesPerSec,ResultNotMatchingAnyTargetProperty from Win32_PerfFormattedData_PerfProc_Process WHERE ( Name = 'chrome' )": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process_alt")
if query == "Select CommandLine from Win32_Process WHERE ( Handle = '4036' )" \
or query == "Select UnknownProperty from Win32_Process WHERE ( Handle = '4036' )":
results += load_fixture("win32_process")
if query == ("Select ServiceUptime,TotalBytesSent,TotalBytesReceived,TotalBytesTransferred,CurrentConnections,TotalFilesSent,TotalFilesReceived," # noqa
"TotalConnectionAttemptsAllInstances,TotalGetRequests,TotalPostRequests,TotalHeadRequests,TotalPutRequests,TotalDeleteRequests," # noqa
"TotalOptionsRequests,TotalTraceRequests,TotalNotFoundErrors,TotalLockedErrors,TotalAnonymousUsers,TotalNonAnonymousUsers,TotalCGIRequests," # noqa
"TotalISAPIExtensionRequests from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = 'Failing site' ) OR ( Name = 'Default Web Site' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice", ("Name", "Default Web Site")) # noqa
if query == ("Select ServiceUptime,TotalBytesSent,TotalBytesReceived,TotalBytesTransferred,CurrentConnections,TotalFilesSent,TotalFilesReceived," # noqa
"TotalConnectionAttemptsAllInstances,TotalGetRequests,TotalPostRequests,TotalHeadRequests,TotalPutRequests,TotalDeleteRequests," # noqa
"TotalOptionsRequests,TotalTraceRequests,TotalNotFoundErrors,TotalLockedErrors,TotalAnonymousUsers,TotalNonAnonymousUsers,TotalCGIRequests," # noqa
"TotalISAPIExtensionRequests from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = '_Total' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice", ("Name", "_Total")) # noqa
if query == ("Select * from Win32_PerfFormattedData_W3SVC_WebService WHERE ( Name = 'Failing site' ) OR ( Name = 'Default Web Site' )"): # noqa
results += load_fixture("win32_perfformatteddata_w3svc_webservice_2008", ("Name", "Default Web Site")) # noqa
if query == ("Select Name,State from Win32_Service WHERE ( Name = 'WSService' ) OR ( Name = 'WinHttpAutoProxySvc' )"): # noqa
results += load_fixture("win32_service_up", ("Name", "WinHttpAutoProxySvc"))
results += load_fixture("win32_service_down", ("Name", "WSService"))
if query == ("Select Message,SourceName,TimeGenerated,Type,User,InsertionStrings,EventCode from Win32_NTLogEvent WHERE ( ( SourceName = 'MSSQLSERVER' ) " # noqa
"AND ( Type = 'Error' OR Type = 'Warning' ) AND TimeGenerated >= '20151224113047.000000-480' )"): # noqa
results += load_fixture("win32_ntlogevent")
return results
ExecQuery.call_count = _exec_query_call_count
class Dispatch(object):
"""
Mock for win32com.client Dispatch class.
"""
_connect_call_count = Counter()
def __init__(self, *args, **kwargs):
pass
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `ConnectServer.call_count` to 0.
"""
cls._connect_call_count.reset()
def ConnectServer(self, *args, **kwargs):
"""
Return a WMI connection, a.k.a. a SWbemServices object.
"""
Dispatch._connect_call_count += 1
wmi_conn_args = (args, kwargs)
return SWbemServices(wmi_conn_args)
ConnectServer.call_count = _connect_call_count
def to_time(wmi_ts):
"Just return any time struct"
return (2015, 12, 24, 11, 30, 47, 0, 0)
def from_time(year=0, month=0, day=0, hours=0, minutes=0,
seconds=0, microseconds=0, timezone=0):
"Just return any WMI date"
return "20151224113047.000000-480"
class TestCommonWMI(unittest.TestCase):
"""
Common toolbox for WMI unit testing.
"""
def setUp(self):
"""
Mock WMI related Python packages, so it can be tested on any environment.
"""
global WMISampler
self.patcher = patch.dict('sys.modules',{
'pywintypes': Mock(),
'pythoncom': Mock(),
'win32com': Mock(),
'win32com.client': Mock(Dispatch=Dispatch),
})
self.patcher.start()
from checks.libs.wmi import sampler
WMISampler = partial(sampler.WMISampler, log)
def tearDown(self):
"""
Reset Mock counters
"""
# Reset counters
Dispatch.reset()
SWbemServices.reset()
def assertWMIConn(self, wmi_sampler, param=None):
"""
Helper, assertion on the `wmi_sampler`'s WMI connection(s):
* `param`: parameters used to establish the connection
"""
if param:
connection = wmi_sampler.get_connection()
wmi_conn_args, wmi_conn_kwargs = connection.get_conn_args()
if isinstance(param, tuple):
key, value = param
self.assertIn(key, wmi_conn_kwargs)
self.assertEquals(wmi_conn_kwargs[key], value)
else:
self.assertIn(param, wmi_conn_args)
def assertWMIQuery(self, query=None, flags=None):
"""
Helper, assert that the given WMI query and flags were submitted.
"""
if query:
last_wmi_query = SWbemServices.get_last_wmi_query()
self.assertEquals(last_wmi_query, query)
if flags:
last_wmi_flags = SWbemServices.get_last_wmi_flags()
self.assertEquals(last_wmi_flags, flags)
def assertWMIObject(self, wmi_obj, properties):
"""
Assert the WMI object integrity, i.e. contains the given properties.
"""
for prop_and_value in properties:
prop = prop_and_value[0] if isinstance(prop_and_value, tuple) else prop_and_value
value = prop_and_value[1] if isinstance(prop_and_value, tuple) else None
self.assertIn(prop, wmi_obj)
if value is None:
continue
self.assertEquals(wmi_obj[prop], value)
def assertWMISampler(self, wmi_sampler, properties, count=None):
"""
Assert WMI objects' integrity among the WMI sampler.
"""
self.assertEquals(len(wmi_sampler), count)
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, properties)
def assertIn(self, first, second):
"""
Assert `first` in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
def assertNotIn(self, first, second):
"""
Assert `first` is not in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first not in second, "{0} in {1}".format(first, second))
def assertInPartial(self, first, second):
"""
Assert `first` has a key in `second` where it's a prefix.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(any(key for key in second if key.startswith(first)), "{0} not in {1}".format(first, second))
def getProp(self, dict, prefix):
"""
Get Property from dictionary `dict` starting with `prefix`.
Note: needs to be defined for Python 2.6
"""
for key in dict:
if key.startswith(prefix):
return dict[key]
return None
class TestUnitWMISampler(TestCommonWMI):
"""
Unit tests for WMISampler.
"""
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
wmi_sampler = WMISampler(
"Win32_PerfRawData_PerfOS_System",
["ProcessorQueueLength"],
host="myhost",
namespace="some/namespace",
username="datadog",
password="password"
)
# Request a connection but do nothing
wmi_sampler.get_connection()
# Connection was established with the right parameters
self.assertWMIConn(wmi_sampler, param="myhost")
self.assertWMIConn(wmi_sampler, param="some/namespace")
def test_no_wmi_connection_pooling(self):
"""
WMI connections are not be shared among WMISampler objects.
"""
from win32com.client import Dispatch
wmi_sampler_1 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"])
wmi_sampler_2 = WMISampler("Win32_OperatingSystem", ["TotalVisibleMemorySize"])
wmi_sampler_3 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], host="myhost") # noqa
wmi_sampler_1.sample()
wmi_sampler_2.sample()
# 3 conns have been opened, 2 for the raw sampler and 1 for the other sampler
self.assertEquals(Dispatch.ConnectServer.call_count, 3, Dispatch.ConnectServer.call_count)
wmi_sampler_3.sample()
# 5 conns now
self.assertEquals(Dispatch.ConnectServer.call_count, 5, Dispatch.ConnectServer.call_count)
def test_wql_filtering(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName", 'Id': "SomeId"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE ( Name = 'SomeName' AND Id = 'SomeId' )",
format_filter(filters))
def test_wql_multiquery_filtering(self):
"""
Format the filters with multiple properties per instance to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName", 'Property1': "foo"}, {'Name': "OtherName", 'Property1': "bar"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE ( Property1 = 'bar' AND Name = 'OtherName' ) OR"
" ( Property1 = 'foo' AND Name = 'SomeName' )",
format_filter(filters))
def test_wql_empty_list(self):
"""
Format filters to a comprehensive WQL `WHERE` clause skipping empty lists.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
filters = []
query = {}
query['User'] = ('=', 'luser')
query['SourceName'] = ('=', 'MSSQL')
query['EventCode'] = []
query['SomethingEmpty'] = []
query['MoreNothing'] = []
filters.append(query)
self.assertEquals(" WHERE ( SourceName = 'MSSQL' AND User = 'luser' )",
format_filter(filters))
def test_wql_filtering_op_adv(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause w/ mixed filter containing regular and operator modified properties.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
filters = [{'Name': "Foo%"}, {'Name': "Bar%", 'Id': ('>=', "SomeId")}, {'Name': "Zulu"}]
self.assertEquals(" WHERE ( Name = 'Zulu' ) OR ( Name LIKE 'Bar%' AND Id >= 'SomeId' ) OR ( Name LIKE 'Foo%' )",
format_filter(filters))
def test_wql_eventlog_filtering(self):
"""
Format filters with the eventlog expected form to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
from datetime import datetime
from checks.wmi_check import from_time
format_filter = sampler.WMISampler._format_filter
filters = []
query = {}
and_props = ['mEssage']
ltypes = ["Error", "Warning"]
source_names = ["MSSQLSERVER", "IIS"]
log_files = ["System", "Security"]
event_codes = [302, 404, 501]
message_filters = ["-foo", "%bar%", "%zen%"]
last_ts = datetime(2016, 1, 1, 15, 8, 24, 78915)
query['TimeGenerated'] = ('>=', from_time(last_ts))
query['Type'] = ('=', 'footype')
query['User'] = ('=', 'luser')
query['SourceName'] = ('=', 'MSSQL')
query['LogFile'] = ('=', 'thelogfile')
query['Type'] = []
for ltype in ltypes:
query['Type'].append(('=', ltype))
query['SourceName'] = []
for source_name in source_names:
query['SourceName'].append(('=', source_name))
query['LogFile'] = []
for log_file in log_files:
query['LogFile'].append(('=', log_file))
query['EventCode'] = []
for code in event_codes:
query['EventCode'].append(('=', code))
query['NOT Message'] = []
query['Message'] = []
for filt in message_filters:
if filt[0] == '-':
query['NOT Message'].append(('LIKE', filt[1:]))
else:
query['Message'].append(('LIKE', filt))
filters.append(query)
self.assertEquals(" WHERE ( NOT Message LIKE 'foo' AND ( EventCode = '302' OR EventCode = '404' OR EventCode = '501' ) "
"AND ( SourceName = 'MSSQLSERVER' OR SourceName = 'IIS' ) AND TimeGenerated >= '2016-01-01 15:08:24.078915**********.******+' "
"AND User = 'luser' AND Message LIKE '%bar%' AND Message LIKE '%zen%' AND ( LogFile = 'System' OR LogFile = 'Security' ) "
"AND ( Type = 'Error' OR Type = 'Warning' ) )",
format_filter(filters, and_props))
def test_wql_filtering_inclusive(self):
"""
Format the filters to a comprehensive and inclusive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
filters = [{'Name': "SomeName"}, {'Id': "SomeId"}]
self.assertEquals(" WHERE ( Id = 'SomeId' ) OR ( Name = 'SomeName' )",
format_filter(filters, True))
def test_wmi_query(self):
"""
Query WMI using WMI Query Language (WQL).
"""
# No filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
)
# Single filter
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE ( Name = 'C:' )"
)
# Multiple filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:", 'Id': "123"}])
wmi_sampler.sample()
self.assertWMIQuery(
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE ( Name = 'C:' AND Id = '123' )"
)
def test_wmi_parser(self):
"""
Parse WMI objects from WMI query results.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
# Assert `results`
expected_results = [
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
self.assertEquals(wmi_sampler, expected_results, wmi_sampler)
def test_wmi_sampler_iterator_getter(self):
"""
Iterate/Get on the WMISampler object iterates/gets on its current sample.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertEquals(len(wmi_sampler), 2)
# Using an iterator
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_sampler[index], ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
def test_wmi_sampler_timeout(self):
"""
Gracefully handle WMI query timeouts.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
# Create a sampler that timeouts
wmi_sampler = WMISampler(logger, "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
timeout_duration=0.1)
SWbemServices._exec_query_run_time = 0.11
# `TimeoutException` exception is raised, DEBUG message logged
self.assertRaises(TimeoutException, wmi_sampler.sample)
self.assertTrue(wmi_sampler._sampling)
self.assertTrue(logger.debug.called)
# Cannot iterate on data
self.assertRaises(TypeError, lambda: len(wmi_sampler))
self.assertRaises(TypeError, lambda: sum(1 for _ in wmi_sampler))
# Recover from timeout at next iteration
wmi_sampler.sample()
self.assertFalse(wmi_sampler._sampling)
# The existing query was retrieved
self.assertEquals(SWbemServices.ExecQuery.call_count, 1, SWbemServices.ExecQuery.call_count)
# Data is populated
self.assertEquals(len(wmi_sampler), 2)
self.assertEquals(sum(1 for _ in wmi_sampler), 2)
def test_raw_perf_properties(self):
"""
Extend the list of properties to query for RAW Performance classes.
"""
# Formatted Performance class
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfOS_System", ["ProcessorQueueLength"])
self.assertEquals(len(wmi_sampler.property_names), 1)
# Raw Performance class
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
self.assertEquals(len(wmi_sampler.property_names), 4)
def test_raw_initial_sampling(self):
"""
Query for initial sample for RAW Performance classes.
"""
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_sampler.sample()
# 2 queries should have been made: one for initialization, one for sampling
self.assertEquals(SWbemServices.ExecQuery.call_count, 2, SWbemServices.ExecQuery.call_count)
# Repeat
wmi_sampler.sample()
self.assertEquals(SWbemServices.ExecQuery.call_count, 3, SWbemServices.ExecQuery.call_count)
def test_raw_cache_qualifiers(self):
"""
Cache the qualifiers on the first query against RAW Performance classes.
"""
# Append `flag_use_amended_qualifiers` flag on the first query
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler._query()
self.assertWMIQuery(flags=131120)
wmi_raw_sampler._query()
self.assertWMIQuery(flags=48)
# Qualifiers are cached
self.assertTrue(wmi_raw_sampler.property_counter_types)
self.assertIn('CounterRawCount', wmi_raw_sampler.property_counter_types)
self.assertIn('CounterCounter', wmi_raw_sampler.property_counter_types)
def test_raw_properties_formatting(self):
"""
WMI Object's RAW data are returned formatted.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(
wmi_raw_sampler,
[
("CounterRawCount", 500), ("CounterCounter", 50),
"Timestamp_Sys100NS", "Frequency_Sys100NS", "name"
],
count=2
)
def test_raw_properties_fallback(self):
"""
Print a warning on RAW Performance classes if the calculator is undefined.
Returns the original RAW value.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
wmi_raw_sampler = WMISampler(logger, "Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(
wmi_raw_sampler,
[
("UnknownCounter", 999), "Timestamp_Sys100NS", "Frequency_Sys100NS", "Name"
],
count=1
)
self.assertTrue(logger.warning.called)
def test_missing_property(self):
"""
Do not raise on missing properties but backfill with empty values.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertWMISampler(wmi_raw_sampler, ["MissingProperty"], count=1)
class TestIntegrationWMI(unittest.TestCase):
"""
Integration tests for WMISampler.
"""
pass
| bsd-3-clause | 1,308,104,610,086,114,800 | 36.561662 | 171 | 0.603048 | false |
pombredanne/pants | src/python/pants/option/options_bootstrapper.py | 13 | 8660 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import logging
import os
import sys
from pants.base.build_environment import get_default_pants_config_file
from pants.option.arg_splitter import GLOBAL_SCOPE, GLOBAL_SCOPE_CONFIG_SECTION
from pants.option.config import Config
from pants.option.custom_types import ListValueComponent
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.option_tracker import OptionTracker
from pants.option.options import Options
logger = logging.getLogger(__name__)
class OptionsBootstrapper(object):
"""An object that knows how to create options in two stages: bootstrap, and then full options."""
@staticmethod
def get_config_file_paths(env, args):
"""Get the location of the config files.
The locations are specified by the --pants-config-files option. However we need to load the
config in order to process the options. This method special-cases --pants-config-files
in order to solve this chicken-and-egg problem.
Note that, obviously, it's not possible to set the location of config files in a config file.
Doing so will have no effect.
"""
# This exactly mirrors the logic applied in Option to all regular options. Note that we'll
# also parse --pants-config as a regular option later, but there's no harm in that. In fact,
# it's preferable, so that any code that happens to want to know where we read config from
# can inspect the option.
flag = '--pants-config-files='
evars = ['PANTS_GLOBAL_PANTS_CONFIG_FILES', 'PANTS_PANTS_CONFIG_FILES', 'PANTS_CONFIG_FILES']
path_list_values = [ListValueComponent.create(get_default_pants_config_file())]
for var in evars:
if var in env:
path_list_values.append(ListValueComponent.create(env[var]))
break
for arg in args:
# Technically this is very slightly incorrect, as we don't check scope. But it's
# very unlikely that any task or subsystem will have an option named --pants-config-files.
# TODO: Enforce a ban on options with a --pants- prefix outside our global options?
if arg.startswith(flag):
path_list_values.append(ListValueComponent.create(arg[len(flag):]))
return ListValueComponent.merge(path_list_values).val
def __init__(self, env=None, args=None):
self._env = env if env is not None else os.environ.copy()
self._post_bootstrap_config = None # Will be set later.
self._args = sys.argv if args is None else args
self._bootstrap_options = None # We memoize the bootstrap options here.
self._full_options = {} # We memoize the full options here.
self._option_tracker = OptionTracker()
def get_bootstrap_options(self):
""":returns: an Options instance that only knows about the bootstrap options.
:rtype: :class:`Options`
"""
if not self._bootstrap_options:
flags = set()
short_flags = set()
def capture_the_flags(*args, **kwargs):
for arg in args:
flags.add(arg)
if len(arg) == 2:
short_flags.add(arg)
elif kwargs.get('type') == bool:
flags.add('--no-{}'.format(arg[2:]))
GlobalOptionsRegistrar.register_bootstrap_options(capture_the_flags)
def is_bootstrap_option(arg):
components = arg.split('=', 1)
if components[0] in flags:
return True
for flag in short_flags:
if arg.startswith(flag):
return True
return False
# Take just the bootstrap args, so we don't choke on other global-scope args on the cmd line.
# Stop before '--' since args after that are pass-through and may have duplicate names to our
# bootstrap options.
bargs = filter(is_bootstrap_option, itertools.takewhile(lambda arg: arg != '--', self._args))
configpaths = self.get_config_file_paths(env=self._env, args=self._args)
pre_bootstrap_config = Config.load(configpaths)
def bootstrap_options_from_config(config):
bootstrap_options = Options.create(env=self._env, config=config,
known_scope_infos=[GlobalOptionsRegistrar.get_scope_info()], args=bargs,
option_tracker=self._option_tracker)
def register_global(*args, **kwargs):
bootstrap_options.register(GLOBAL_SCOPE, *args, **kwargs)
GlobalOptionsRegistrar.register_bootstrap_options(register_global)
return bootstrap_options
initial_bootstrap_options = bootstrap_options_from_config(pre_bootstrap_config)
bootstrap_option_values = initial_bootstrap_options.for_global_scope()
# Now re-read the config, post-bootstrapping. Note the order: First whatever we bootstrapped
# from (typically pants.ini), then config override, then rcfiles.
full_configpaths = pre_bootstrap_config.sources()
if bootstrap_option_values.config_override:
full_configpaths.extend(bootstrap_option_values.config_override)
if bootstrap_option_values.pantsrc:
rcfiles = [os.path.expanduser(rcfile) for rcfile in bootstrap_option_values.pantsrc_files]
existing_rcfiles = filter(os.path.exists, rcfiles)
full_configpaths.extend(existing_rcfiles)
self._post_bootstrap_config = Config.load(full_configpaths,
seed_values=bootstrap_option_values)
# Now recompute the bootstrap options with the full config. This allows us to pick up
# bootstrap values (such as backends) from a config override file, for example.
self._bootstrap_options = bootstrap_options_from_config(self._post_bootstrap_config)
return self._bootstrap_options
def get_full_options(self, known_scope_infos):
"""Get the full Options instance bootstrapped by this object for the given known scopes.
:param known_scope_infos: ScopeInfos for all scopes that may be encountered.
:returns: A bootrapped Options instance that also carries options for all the supplied known
scopes.
:rtype: :class:`Options`
"""
key = frozenset(sorted(known_scope_infos))
if key not in self._full_options:
# Note: Don't inline this into the Options() call, as this populates
# self._post_bootstrap_config, which is another argument to that call.
bootstrap_option_values = self.get_bootstrap_options().for_global_scope()
self._full_options[key] = Options.create(self._env,
self._post_bootstrap_config,
known_scope_infos,
args=self._args,
bootstrap_option_values=bootstrap_option_values,
option_tracker=self._option_tracker)
return self._full_options[key]
def verify_configs_against_options(self, options):
"""Verify all loaded configs have correct scopes and options.
:param options: Fully bootstrapped valid options.
:return: None.
"""
error_log = []
for config in self._post_bootstrap_config.configs():
for section in config.sections():
if section == GLOBAL_SCOPE_CONFIG_SECTION:
scope = GLOBAL_SCOPE
else:
scope = section
try:
valid_options_under_scope = set(options.for_scope(scope))
# Only catch ConfigValidationError. Other exceptions will be raised directly.
except Config.ConfigValidationError:
error_log.append("Invalid scope [{}] in {}".format(section, config.configpath))
else:
# All the options specified under [`section`] in `config` excluding bootstrap defaults.
all_options_under_scope = (set(config.configparser.options(section)) -
set(config.configparser.defaults()))
for option in all_options_under_scope:
if option not in valid_options_under_scope:
error_log.append("Invalid option '{}' under [{}] in {}".format(option, section, config.configpath))
if error_log:
for error in error_log:
logger.error(error)
raise Config.ConfigValidationError("Invalid config entries detected. "
"See log for details on which entries to update or remove.\n"
"(Specify --no-verify-config to disable this check.)")
| apache-2.0 | -770,891,687,504,048,600 | 45.06383 | 113 | 0.663857 | false |
barthisrael/OmniDB | OmniDB/OmniDB_app/include/OmniDatabase/Oracle.py | 2 | 69309 | '''
The MIT License (MIT)
Portions Copyright (c) 2015-2019, The OmniDB Team
Portions Copyright (c) 2017-2019, 2ndQuadrant Limited
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import os.path
import re
from collections import OrderedDict
from enum import Enum
import OmniDB_app.include.Spartacus as Spartacus
import OmniDB_app.include.Spartacus.Database as Database
import OmniDB_app.include.Spartacus.Utils as Utils
'''
------------------------------------------------------------------------
Template
------------------------------------------------------------------------
'''
class TemplateType(Enum):
EXECUTE = 1
SCRIPT = 2
class Template:
def __init__(self, p_text, p_type=TemplateType.EXECUTE):
self.v_text = p_text
self.v_type = p_type
'''
------------------------------------------------------------------------
Oracle
------------------------------------------------------------------------
'''
class Oracle:
def __init__(self, p_server, p_port, p_service, p_user, p_password, p_conn_id=0, p_alias='', p_conn_string='', p_parse_conn_string = False):
self.v_alias = p_alias
self.v_db_type = 'oracle'
self.v_conn_string = p_conn_string
self.v_conn_id = p_conn_id
self.v_port = p_port
if p_port is None or p_port == '':
self.v_active_port = '1521'
else:
self.v_active_port = p_port
self.v_service = p_service.upper()
if p_service is None or p_service == '':
self.v_active_service = 'XE'
else:
self.v_active_service = p_service.upper()
self.v_server = p_server
self.v_active_server = p_server
self.v_user = p_user.upper()
self.v_active_user = p_user.upper()
#try to get info from connection string
if p_conn_string!='' and p_parse_conn_string:
try:
parsed = urlparse(p_conn_string)
if parsed.port!=None:
self.v_active_port = str(parsed.port)
if parsed.hostname!=None:
self.v_active_server = parsed.hostname
if parsed.username!=None:
self.v_active_user = parsed.username
if parsed.query!=None:
self.v_conn_string_query = parsed.query
parsed_database = parsed.path
if len(parsed_database)>1:
self.v_active_service = parsed_database[1:]
except Exception as exc:
self.v_conn_string_error = 'Syntax error in the connection string.'
None
if self.v_user.replace(' ', '') != self.v_user:
self.v_schema = '"{0}"'.format(p_user)
else:
self.v_schema = self.v_user
self.v_connection = Spartacus.Database.Oracle(self.v_active_server, self.v_active_port, self.v_active_service, self.v_active_user, p_password, p_conn_string)
self.v_has_schema = True
self.v_has_functions = True
self.v_has_procedures = True
self.v_has_sequences = True
self.v_has_primary_keys = True
self.v_has_foreign_keys = True
self.v_has_uniques = True
self.v_has_indexes = True
self.v_has_checks = False
self.v_has_excludes = False
self.v_has_rules = False
self.v_has_triggers = False
self.v_has_partitions = False
self.v_has_update_rule = False
self.v_can_rename_table = True
self.v_rename_table_command = "alter table #p_table_name# rename to #p_new_table_name#"
self.v_create_pk_command = "constraint #p_constraint_name# primary key (#p_columns#)"
self.v_create_fk_command = "constraint #p_constraint_name# foreign key (#p_columns#) references #p_r_table_name# (#p_r_columns#) #p_delete_update_rules#"
self.v_create_unique_command = "constraint #p_constraint_name# unique (#p_columns#)"
self.v_can_alter_type = True
self.v_alter_type_command = "alter table #p_table_name# modify #p_column_name# #p_new_data_type#"
self.v_can_alter_nullable = True
self.v_set_nullable_command = "alter table #p_table_name# modify #p_column_name# null"
self.v_drop_nullable_command = "alter table #p_table_name# modify #p_column_name# not null"
self.v_can_rename_column = True
self.v_rename_column_command = "alter table #p_table_name# rename column #p_column_name# to #p_new_column_name#"
self.v_can_add_column = True
self.v_add_column_command = "alter table #p_table_name# add #p_column_name# #p_data_type# #p_nullable#"
self.v_can_drop_column = True
self.v_drop_column_command = "alter table #p_table_name# drop column #p_column_name#"
self.v_can_add_constraint = True
self.v_add_pk_command = "alter table #p_table_name# add constraint #p_constraint_name# primary key (#p_columns#)"
self.v_add_fk_command = "alter table #p_table_name# add constraint #p_constraint_name# foreign key (#p_columns#) references #p_r_table_name# (#p_r_columns#) #p_delete_update_rules#"
self.v_add_unique_command = "alter table #p_table_name# add constraint #p_constraint_name# unique (#p_columns#)"
self.v_can_drop_constraint = True
self.v_drop_pk_command = "alter table #p_table_name# drop constraint #p_constraint_name#"
self.v_drop_fk_command = "alter table #p_table_name# drop constraint #p_constraint_name#"
self.v_drop_unique_command = "alter table #p_table_name# drop constraint #p_constraint_name#"
self.v_create_index_command = "create index #p_index_name# on #p_table_name# (#p_columns#)";
self.v_create_unique_index_command = "create unique index #p_index_name# on #p_table_name# (#p_columns#)"
self.v_drop_index_command = "drop index #p_schema_name#.#p_index_name#"
self.v_update_rules = [
"NO ACTION"
]
self.v_delete_rules = [
"NO ACTION",
"SET NULL",
"CASCADE"
]
self.v_reserved_words = []
self.v_console_help = "Console tab. Type the commands in the editor below this box. \? to view command list."
self.v_use_server_cursor = False
def GetName(self):
return self.v_service
def GetVersion(self):
return self.v_connection.ExecuteScalar('''
select (case when product like '%Express%'
then 'Oracle XE '
else 'Oracle '
end) || version
from product_component_version
where product like 'Oracle%'
''')
def GetUserName(self):
return self.v_user
def GetUserSuper(self):
try:
v_sessions = self.v_connection.Query('select * from v$session where rownum <= 1')
return True
except Exception as exc:
return False
def GetExpress(self):
v_express = self.v_connection.Query("select * from product_component_version where product like '%Express%'")
return (len(v_express.Rows) > 0)
def PrintDatabaseInfo(self):
return self.v_user + '@' + self.v_service
def PrintDatabaseDetails(self):
return self.v_server + ':' + self.v_port
def HandleUpdateDeleteRules(self, p_update_rule, p_delete_rule):
v_rules = ''
if p_delete_rule.strip() != '':
v_rules += ' on delete ' + p_delete_rule + ' '
return v_rules
def TestConnection(self):
v_return = ''
if self.v_conn_string and self.v_conn_string_error!='':
return self.v_conn_string_error
try:
self.v_connection.Open()
self.v_connection.Close()
v_return = 'Connection successful.'
except Exception as exc:
v_return = str(exc)
return v_return
def GetErrorPosition(self, p_error_message):
vector = str(p_error_message).split('\n')
v_return = None
if len(vector) > 1 and vector[1][0:4]=='LINE':
v_return = {
'row': vector[1].split(':')[0].split(' ')[1],
'col': vector[2].index('^') - len(vector[1].split(':')[0])-2
}
return v_return
def QueryRoles(self):
return self.v_connection.Query('''
select (case when upper(replace(username, ' ', '')) <> username then '"' || username || '"' else username end) as "role_name"
from all_users
order by username
''', True)
def QueryTablespaces(self):
return self.v_connection.Query('''
select (case when upper(replace(tablespace_name, ' ', '')) <> tablespace_name then '"' || tablespace_name || '"' else tablespace_name end) as "tablespace_name"
from dba_tablespaces
order by tablespace_name
''', True)
def QueryTables(self, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
return self.v_connection.Query('''
select (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) as "table_name",
(case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) as "table_schema"
from all_tables
where 1 = 1
{0}
order by owner,
table_name
'''.format(v_filter), True)
def QueryTablesFields(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and table_name = '{0}' ".format(p_table)
return self.v_connection.Query('''
select (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) as "table_name",
(case when upper(replace(column_name, ' ', '')) <> column_name then '"' || column_name || '"' else column_name end) as "column_name",
case when data_type = 'NUMBER' and data_scale = '0' then 'INTEGER' else data_type end as "data_type",
case nullable when 'Y' then 'YES' else 'NO' end as "nullable",
data_length as "data_length",
data_precision as "data_precision",
data_scale as "data_scale"
from all_tab_columns
where 1 = 1
{0}
order by table_name,
column_id
'''.format(v_filter), True)
def QueryTablesForeignKeys(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{0}' ".format(p_table)
return self.v_connection.Query('''
select (case when upper(replace(constraint_info.constraint_name, ' ', '')) <> constraint_info.constraint_name then '"' || constraint_info.constraint_name || '"' else constraint_info.constraint_name end) as "constraint_name",
(case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) as "table_name",
(case when upper(replace(constraint_info.r_constraint_name, ' ', '')) <> constraint_info.r_constraint_name then '"' || constraint_info.r_constraint_name || '"' else constraint_info.r_constraint_name end) as "r_constraint_name",
(case when upper(replace(master_table.table_name, ' ', '')) <> master_table.table_name then '"' || master_table.table_name || '"' else master_table.table_name end) as "r_table_name",
(case when upper(replace(detail_table.owner, ' ', '')) <> detail_table.owner then '"' || detail_table.owner || '"' else detail_table.owner end) as "table_schema",
(case when upper(replace(master_table.owner, ' ', '')) <> master_table.owner then '"' || master_table.owner || '"' else master_table.owner end) as "r_table_schema",
constraint_info.delete_rule as "delete_rule",
'NO ACTION' as "update_rule"
from user_constraints constraint_info,
user_cons_columns detail_table,
user_cons_columns master_table
where constraint_info.constraint_name = detail_table.constraint_name
and constraint_info.r_constraint_name = master_table.constraint_name
and detail_table.position = master_table.position
and constraint_info.constraint_type = 'R'
{0}
order by constraint_info.constraint_name,
detail_table.table_name
'''.format(v_filter), True)
def QueryTablesForeignKeysColumns(self, p_fkey, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(constraint_info.owner, ' ', '')) <> constraint_info.owner then '"' || constraint_info.owner || '"' else constraint_info.owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) = '{0}' ".format(p_table)
v_filter = v_filter + "and (case when upper(replace(constraint_info.constraint_name, ' ', '')) <> constraint_info.constraint_name then '"' || constraint_info.constraint_name || '"' else constraint_info.constraint_name end) = '{0}' ".format(p_fkey)
return self.v_connection.Query('''
select (case when upper(replace(constraint_info.constraint_name, ' ', '')) <> constraint_info.constraint_name then '"' || constraint_info.constraint_name || '"' else constraint_info.constraint_name end) as "constraint_name",
(case when upper(replace(detail_table.table_name, ' ', '')) <> detail_table.table_name then '"' || detail_table.table_name || '"' else detail_table.table_name end) as "table_name",
(case when upper(replace(detail_table.column_name, ' ', '')) <> detail_table.column_name then '"' || detail_table.column_name || '"' else detail_table.column_name end) as "column_name",
(case when upper(replace(constraint_info.r_constraint_name, ' ', '')) <> constraint_info.r_constraint_name then '"' || constraint_info.r_constraint_name || '"' else constraint_info.r_constraint_name end) as "r_constraint_name",
(case when upper(replace(master_table.table_name, ' ', '')) <> master_table.table_name then '"' || master_table.table_name || '"' else master_table.table_name end) as "r_table_name",
(case when upper(replace(master_table.column_name, ' ', '')) <> master_table.column_name then '"' || master_table.column_name || '"' else master_table.column_name end) as "r_column_name",
(case when upper(replace(detail_table.owner, ' ', '')) <> detail_table.owner then '"' || detail_table.owner || '"' else detail_table.owner end) as "table_schema",
(case when upper(replace(master_table.owner, ' ', '')) <> master_table.owner then '"' || master_table.owner || '"' else master_table.owner end) as "r_table_schema",
constraint_info.delete_rule as "delete_rule",
'NO ACTION' as "update_rule",
detail_table.position as "ordinal_position"
from user_constraints constraint_info,
user_cons_columns detail_table,
user_cons_columns master_table
where constraint_info.constraint_name = detail_table.constraint_name
and constraint_info.r_constraint_name = master_table.constraint_name
and detail_table.position = master_table.position
and constraint_info.constraint_type = 'R'
{0}
order by constraint_info.constraint_name,
detail_table.table_name,
detail_table.position
'''.format(v_filter), True)
def QueryTablesPrimaryKeys(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{0}' ".format(p_table)
return self.v_connection.Query('''
select distinct *
from (
select (case when upper(replace(cons.constraint_name, ' ', '')) <> cons.constraint_name then '"' || cons.constraint_name || '"' else cons.constraint_name end) as "constraint_name",
(case when upper(replace(cols.table_name, ' ', '')) <> cols.table_name then '"' || cols.table_name || '"' else cols.table_name end) as "table_name",
(case when upper(replace(cons.owner, ' ', '')) <> cons.owner then '"' || cons.owner || '"' else cons.owner end) as "table_schema"
from all_constraints cons,
all_cons_columns cols,
all_tables t
where cons.constraint_type = 'P'
and t.table_name = cols.table_name
and cons.constraint_name = cols.constraint_name
and cons.owner = cols.owner
order by cons.owner,
cols.table_name,
cons.constraint_name
)
where 1 = 1
{0}
'''.format(v_filter), True)
def QueryTablesPrimaryKeysColumns(self, p_pkey, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{0}' ".format(p_table)
v_filter = v_filter + "and (case when upper(replace(\"constraint_name\", ' ', '')) <> \"constraint_name\" then '"' || \"constraint_name\" || '"' else \"constraint_name\" end) = '{0}' ".format(p_pkey)
return self.v_connection.Query('''
select "column_name"
from (
select (case when upper(replace(cons.constraint_name, ' ', '')) <> cons.constraint_name then '"' || cons.constraint_name || '"' else cons.constraint_name end) as "constraint_name",
(case when upper(replace(cols.table_name, ' ', '')) <> cols.table_name then '"' || cols.table_name || '"' else cols.table_name end) as "table_name",
(case when upper(replace(cols.column_name, ' ', '')) <> cols.column_name then '"' || cols.column_name || '"' else cols.column_name end) as "column_name",
(case when upper(replace(cons.owner, ' ', '')) <> cons.owner then '"' || cons.owner || '"' else cons.owner end) as "table_schema"
from all_constraints cons,
all_cons_columns cols,
all_tables t
where cons.constraint_type = 'P'
and t.table_name = cols.table_name
and cons.constraint_name = cols.constraint_name
and cons.owner = cols.owner
order by cons.owner,
cols.table_name,
cons.constraint_name,
cols.position
)
where 1 = 1
{0}
'''.format(v_filter), True)
def QueryTablesUniques(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{0}' ".format(p_table)
return self.v_connection.Query('''
select distinct *
from (
select (case when upper(replace(cons.constraint_name, ' ', '')) <> cons.constraint_name then '"' || cons.constraint_name || '"' else cons.constraint_name end) as "constraint_name",
(case when upper(replace(cols.table_name, ' ', '')) <> cols.table_name then '"' || cols.table_name || '"' else cols.table_name end) as "table_name",
(case when upper(replace(cons.owner, ' ', '')) <> cons.owner then '"' || cons.owner || '"' else cons.owner end) as "table_schema"
from all_constraints cons,
all_cons_columns cols,
all_tables t
where cons.constraint_type = 'U'
and t.table_name = cols.table_name
and cons.constraint_name = cols.constraint_name
and cons.owner = cols.owner
order by cons.owner,
cols.table_name,
cons.constraint_name
)
where 1 = 1
{0}
'''.format(v_filter), True)
def QueryTablesUniquesColumns(self, p_unique, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(\"table_schema\", ' ', '')) <> \"table_schema\" then '"' || \"table_schema\" || '"' else \"table_schema\" end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(\"table_name\", ' ', '')) <> \"table_name\" then '"' || \"table_name\" || '"' else \"table_name\" end) = '{0}' ".format(p_table)
v_filter = v_filter + "and (case when upper(replace(\"constraint_name\", ' ', '')) <> \"constraint_name\" then '"' || \"constraint_name\" || '"' else \"constraint_name\" end) = '{0}' ".format(p_unique)
return self.v_connection.Query('''
select "column_name"
from (
select (case when upper(replace(cons.constraint_name, ' ', '')) <> cons.constraint_name then '"' || cons.constraint_name || '"' else cons.constraint_name end) as "constraint_name",
(case when upper(replace(cols.table_name, ' ', '')) <> cols.table_name then '"' || cols.table_name || '"' else cols.table_name end) as "table_name",
(case when upper(replace(cols.column_name, ' ', '')) <> cols.column_name then '"' || cols.column_name || '"' else cols.column_name end) as "column_name",
(case when upper(replace(cons.owner, ' ', '')) <> cons.owner then '"' || cons.owner || '"' else cons.owner end) as "table_schema"
from all_constraints cons,
all_cons_columns cols,
all_tables t
where cons.constraint_type = 'U'
and t.table_name = cols.table_name
and cons.constraint_name = cols.constraint_name
and cons.owner = cols.owner
order by cons.owner,
cols.table_name,
cons.constraint_name,
cols.position
)
where 1 = 1
{0}
'''.format(v_filter), True)
def QueryTablesIndexes(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{0}' ".format(p_table)
return self.v_connection.Query('''
select (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) as "schema_name",
(case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) as "table_name",
(case when upper(replace(index_name, ' ', '')) <> index_name then '"' || index_name || '"' else index_name end) as "index_name",
case when uniqueness = 'UNIQUE' then 'Unique' else 'Non Unique' end as "uniqueness"
from all_indexes
where 1=1
{0}
order by owner,
table_name,
index_name
'''.format(v_filter), True)
def QueryTablesIndexesColumns(self, p_index, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(t.owner, ' ', '')) <> t.owner then '"' || t.owner || '"' else t.owner end) = '{0}' and (case when upper(replace(t.table_name, ' ', '')) <> t.table_name then '"' || t.table_name || '"' else t.table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(t.owner, ' ', '')) <> t.owner then '"' || t.owner || '"' else t.owner end) = '{0}' and (case when upper(replace(t.table_name, ' ', '')) <> t.table_name then '"' || t.table_name || '"' else t.table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(t.owner, ' ', '')) <> t.owner then '"' || t.owner || '"' else t.owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(t.owner, ' ', '')) <> t.owner then '"' || t.owner || '"' else t.owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(t.table_name, ' ', '')) <> t.table_name then '"' || t.table_name || '"' else t.table_name end) = '{0}' ".format(p_table)
v_filter = v_filter + "and (case when upper(replace(t.index_name, ' ', '')) <> t.index_name then '"' || t.index_name || '"' else t.index_name end) = '{0}' ".format(p_index)
return self.v_connection.Query('''
select (case when upper(replace(c.column_name, ' ', '')) <> c.column_name then '"' || c.column_name || '"' else c.column_name end) as "column_name"
from all_indexes t,
all_ind_columns c
where t.table_name = c.table_name
and t.index_name = c.index_name
and t.owner = c.index_owner
{0}
order by c.column_position
'''.format(v_filter), True)
def QueryDataLimited(self, p_query, p_count=-1):
if p_count != -1:
try:
self.v_connection.Open()
v_data = self.v_connection.QueryBlock('select * from ( {0} ) t where rownum <= {1}'.format(p_query, p_count), p_count, True, True)
self.v_connection.Close()
return v_data
except Spartacus.Database.Exception as exc:
try:
self.v_connection.Cancel()
except:
pass
raise exc
else:
return self.v_connection.Query(p_query, True)
def QueryTableRecords(self, p_column_list, p_table, p_filter, p_count=-1):
v_limit = ''
if p_count != -1:
v_limit = ' where rownum <= ' + p_count
return self.v_connection.Query('''
select *
from (
select {0}
from {1} t
{2}
)
{3}
'''.format(
p_column_list,
p_table,
p_filter,
v_limit
), True
)
def QueryFunctions(self, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
return self.v_connection.Query('''
select (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) as "schema_name",
(case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) as "id",
(case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) as "name"
from all_procedures
where object_type = 'FUNCTION'
{0}
order by 2
'''.format(v_filter), True)
def QueryFunctionFields(self, p_function, p_schema):
if p_schema:
v_schema = p_schema
else:
v_schema = self.v_schema
return self.v_connection.Query('''
select (case in_out
when 'IN' then 'I'
when 'OUT' then 'O'
else 'R'
end) as "type",
(case when position = 0
then 'return ' || data_type
else argument_name || ' ' || data_type
end) as "name",
position+1 as "seq"
from all_arguments
where (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}'
and (case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) = '{1}'
order by 3
'''.format(v_schema, p_function), True)
def GetFunctionDefinition(self, p_function):
v_body = '-- DROP FUNCTION {0};\n'.format(p_function)
v_body = v_body + self.v_connection.ExecuteScalar("select dbms_lob.substr(dbms_metadata.get_ddl('FUNCTION', '{0}'), 4000, 1) from dual".format(p_function))
return v_body
def QueryProcedures(self, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
return self.v_connection.Query('''
select (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) as "schema_name",
(case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) as "id",
(case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) as "name"
from all_procedures
where object_type = 'PROCEDURE'
{0}
order by 2
'''.format(v_filter), True)
def QueryProcedureFields(self, p_procedure, p_schema):
if p_schema:
v_schema = p_schema
else:
v_schema = self.v_schema
return self.v_connection.Query('''
select (case in_out
when 'IN' then 'I'
when 'OUT' then 'O'
else 'R'
end) as "type",
argument_name || ' ' || data_type as "name",
position+1 as "seq"
from all_arguments
where (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}'
and (case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) = '{1}'
order by 3
'''.format(v_schema, p_procedure), True)
def GetProcedureDefinition(self, p_procedure):
v_body = '-- DROP PROCEDURE {0};\n'.format(p_procedure)
v_body = v_body + self.v_connection.ExecuteScalar("select dbms_lob.substr(dbms_metadata.get_ddl('PROCEDURE', '{0}'), 4000, 1) from dual".format(p_procedure))
return v_body
def QuerySequences(self, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_schema:
v_filter = "and (case when upper(replace(sequence_owner, ' ', '')) <> sequence_owner then '"' || sequence_owner || '"' else sequence_owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(sequence_owner, ' ', '')) <> sequence_owner then '"' || sequence_owner || '"' else sequence_owner end) = '{0}' ".format(self.v_schema)
v_table = self.v_connection.Query('''
select (case when upper(replace(sequence_owner, ' ', '')) <> sequence_owner then '"' || sequence_owner || '"' else sequence_owner end) as "sequence_schema",
(case when upper(replace(sequence_name, ' ', '')) <> sequence_name then '"' || sequence_name || '"' else sequence_name end) as "sequence_name"
from all_sequences
where 1 = 1
{0}
order by sequence_owner,
sequence_name
'''.format(v_filter), True)
return v_table
def QueryViews(self, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
return self.v_connection.Query('''
select (case when upper(replace(view_name, ' ', '')) <> view_name then '"' || view_name || '"' else view_name end) as "table_name",
(case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) as "table_schema"
from all_views
where 1 = 1
{0}
order by 2, 1
'''.format(v_filter), True)
def QueryViewFields(self, p_table=None, p_all_schemas=False, p_schema=None):
v_filter = ''
if not p_all_schemas:
if p_table and p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(p_schema, p_table)
elif p_table:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{1}' ".format(self.v_schema, p_table)
elif p_schema:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(p_schema)
else:
v_filter = "and (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}' ".format(self.v_schema)
else:
if p_table:
v_filter = "and (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) = '{0}' ".format(p_table)
return self.v_connection.Query('''
select (case when upper(replace(table_name, ' ', '')) <> table_name then '"' || table_name || '"' else table_name end) as "table_name",
(case when upper(replace(column_name, ' ', '')) <> column_name then '"' || column_name || '"' else column_name end) as "column_name",
case when data_type = 'NUMBER' and data_scale = '0' then 'INTEGER' else data_type end as "data_type",
case nullable when 'Y' then 'YES' else 'NO' end as "nullable",
data_length as "data_length",
data_precision as "data_precision",
data_scale as "data_scale"
from all_tab_columns
where 1 = 1
{0}
order by table_name, column_id
'''.format(v_filter), True)
def GetViewDefinition(self, p_view, p_schema):
if p_schema:
v_schema = p_schema
else:
v_schema = self.v_schema
return '''CREATE OR REPLACE VIEW {0}.{1} AS
{2}
'''.format(p_schema, p_view,
self.v_connection.ExecuteScalar('''
select text
from all_views
where (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}'
and (case when upper(replace(view_name, ' ', '')) <> view_name then '"' || view_name || '"' else view_name end) = '{1}'
'''.format(v_schema, p_view)
))
def TemplateCreateRole(self):
return Template('''CREATE { ROLE | USER } name
--NOT IDENTIFIED
--IDENTIFIED BY password
--DEFAULT TABLESPACE tablespace
--TEMPORARY TABLESPACE tablespace
--QUOTA { size | UNLIMITED } ON tablespace
--PASSWORD EXPIRE
--ACCOUNT { LOCK | UNLOCK }
''')
def TemplateAlterRole(self):
return Template('''ALTER { ROLE | USER } #role_name#
--NOT IDENTIFIED
--IDENTIFIED BY password
--DEFAULT TABLESPACE tablespace
--TEMPORARY TABLESPACE tablespace
--QUOTA { size | UNLIMITED } ON tablespace
--DEFAULT ROLE { role [, role ] ... | ALL [ EXCEPT role [, role ] ... ] | NONE }
--PASSWORD EXPIRE
--ACCOUNT { LOCK | UNLOCK }
''')
def TemplateDropRole(self):
return Template('''DROP { ROLE | USER } #role_name#
--CASCADE
''')
def TemplateCreateTablespace(self):
return Template('''CREATE { SMALLFILE | BIGFILE }
[ TEMPORARY | UNDO ] TABLESPACE name
[ DATAFILE | TEMPFILE ] 'filename' [ SIZE size ] [ REUSE ]
--AUTOEXTEND OFF | AUTOEXTEND ON [ NEXT size ]
--MAXSIZE [ size | UNLIMITED ]
--MINIMUM EXTENT size
--BLOCKSIZE size
--LOGGING | NOLOGGING | FORCE LOGGING
--ENCRYPTION [ USING 'algorithm' ]
--ONLINE | OFFLINE
--EXTENT MANAGEMENT LOCAL { AUTOALLOCATE | UNIFORM [ SIZE size ] }
--SEGMENT SPACE MANAGEMENT { AUTO | MANUAL }
--FLASHBACK { ON | OFF }
--RETENTION { GUARANTEE | NOGUARANTEE }
''')
def TemplateAlterTablespace(self):
return Template('''ALTER TABLESPACE #tablespace_name#
--MINIMUM EXTENT size
--RESIZE size
--COALESCE
--SHRINK SPACE [ KEEP size ]
--RENAME TO new_name
--[ BEGIN | END ] BACKUP
--ADD [ DATAFILE | TEMPFILE ] 'filename' [ SIZE size ] [ REUSE AUTOEXTEND OFF | AUTOEXTEND ON [ NEXT size ] ] [ MAXSIZE [ size | UNLIMITED ] ]
--DROP [ DATAFILE | TEMPFILE ] 'filename'
--SHRINK TEMPFILE 'filename' [ KEEP size ]
--RENAME DATAFILE 'filename' TO 'new_filename'
--[ DATAFILE | TEMPFILE ] [ ONLINE | OFFLINE ]
--[ NO ] FORCE LOGGING
--ONLINE
--OFFLINE [ NORMAL | TEMPORARY | IMMEDIATE ]
--READ [ ONLY | WRITE ]
--PERMANENT | TEMPORARY
--AUTOEXTEND OFF | AUTOEXTEND ON [ NEXT size ]
--MAXSIZE [ size | UNLIMITED ]
--FLASHBACK { ON | OFF }
--RETENTION { GUARANTEE | NOGUARANTEE }
''')
def TemplateDropTablespace(self):
return Template('''DROP TABLESPACE #tablespace_name#
--INCLUDING CONTENTS
--[ AND | KEEP ] DATAFILES
--CASCADE CONSTRAINTS
''')
def TemplateCreateFunction(self):
return Template('''CREATE OR REPLACE FUNCTION #schema_name#.name
--(
-- [ argmode ] [ argname ] argtype [ { DEFAULT | = } default_expr ]
--)
--RETURN rettype
--PIPELINED
AS
-- variables
-- pragmas
BEGIN
-- definition
END;
''')
def TemplateDropFunction(self):
return Template('DROP FUNCTION #function_name#')
def TemplateCreateProcedure(self):
return Template('''CREATE OR REPLACE PROCEDURE #schema_name#.name
--(
-- [ argmode ] [ argname ] argtype [ { DEFAULT | = } default_expr ]
--)
AS
-- variables
-- pragmas
BEGIN
-- definition
END;
''')
def TemplateDropProcedure(self):
return Template('DROP PROCEDURE #function_name#')
def TemplateCreateTable(self):
return Template('''CREATE
--GLOBAL TEMPORARY
TABLE #schema_name#.table_name
--AS query
(
column_name data_type
--SORT
--DEFAULT expr
--ENCRYPT [ USING 'encrypt_algorithm' ] [ IDENTIFIED BY password ] [ [NO] SALT ]
--CONSTRAINT constraint_name
--NOT NULL
--NULL
--UNIQUE
--PRIMARY KEY
--REFERENCES reftable [ ( refcolumn ) ] [ ON DELETE { CASCADE | SET NULL } ]
--CHECK ( condition )
--DEFERRABLE
--NOT DEFERRABLE
--INITIALLY IMMEDIATE
--INITIALLY DEFERRED
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--RELY
--NORELY
--USING INDEX index_name
)
--ON COMMIT DELETE ROWS
--ON COMMIT PRESERVE ROWS
--PCTFREE integer
--PCTUSED integer
--INITRANS integer
--STORAGE ( { [ INITIAL size_clause ] | [ NEXT size_clause ] | [ MINEXTENTS integer ] | [ MAXEXTENTS { integer | UNLIMITED } ] } )
--TABLESPACE tablespace
--LOGGING
--NOLOGGING
--COMPRESS
--NOCOMPRESS
--SCOPE IS scope_table
--WITH ROWID
--SCOPE FOR ( { refcol | refattr } ) IS scope_table
--REF ( { refcol | refattr } ) WITH ROWID
--GROUP log_group ( column [ NO LOG ] ) [ ALWAYS ]
--DATA ( { ALL | PRIMARY KEY | UNIQUE | FOREIGN KEY } ) COLUMNS
''')
def TemplateAlterTable(self):
return Template('''ALTER TABLE #table_name#
--ADD column_name data_type
--MODIFY (column_name [ data_type ] )
--SORT
--DEFAULT expr
--ENCRYPT [ USING 'encrypt_algorithm' ] [ IDENTIFIED BY password ] [ [NO] SALT ]
--CONSTRAINT constraint_name
--NOT NULL
--NULL
--UNIQUE
--PRIMARY KEY
--REFERENCES reftable [ ( refcolumn ) ] [ ON DELETE { CASCADE | SET NULL } ]
--CHECK ( condition )
--DEFERRABLE
--NOT DEFERRABLE
--INITIALLY IMMEDIATE
--INITIALLY DEFERRED
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--RELY
--NORELY
--USING INDEX index_name
--SET UNUSED COLUMN column [ { CASCADE CONSTRAINTS | INVALIDADE } ]
--DROP COLUMN column [ { CASCADE CONSTRAINTS | INVALIDADE } ] [ CHECKPOINT integer ]
--DROP { UNUSED COLUMNS | COLUMNS CONTINUE } [ CHECKPOINT integer ]
--RENAME COLUMN old_name TO new_name
--ADD CONSTRAINT constraint_name
--NOT NULL
--NULL
--UNIQUE
--PRIMARY KEY
--REFERENCES reftable [ ( refcolumn ) ] [ ON DELETE { CASCADE | SET NULL } ]
--CHECK ( condition )
--MODIFY [ CONSTRAINT constraint_name ] [ PRIMARY KEY ] [ UNIQUE ( column ) ]
--DEFERRABLE
--NOT DEFERRABLE
--INITIALLY IMMEDIATE
--INITIALLY DEFERRED
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--RELY
--NORELY
--USING INDEX index_name
--RENAME CONSTRAINT old_name TO new_name
--DROP PRIMARY KEY [ CASCADE ] [ { KEEP | DROP } INDEX ]
--DROP UNIQUE ( column ) [ CASCADE ] [ { KEEP | DROP } INDEX ]
--DROP CONSTRAINT constraint_name [ CASCADE ]
--PCTFREE integer
--PCTUSED integer
--INITRANS integer
--STORAGE ( { [ INITIAL size_clause ] | [ NEXT size_clause ] | [ MINEXTENTS integer ] | [ MAXEXTENTS { integer | UNLIMITED } ] } )
--TABLESPACE tablespace
--LOGGING
--NOLOGGING
--COMPRESS
--NOCOMPRESS
--CACHE
--NOCACHE
--READ ONLY
--READ WRITE
--SCOPE IS scope_table
--WITH ROWID
--SCOPE FOR ( { refcol | refattr } ) IS scope_table
--REF ( { refcol | refattr } ) WITH ROWID
--GROUP log_group ( column [ NO LOG ] ) [ ALWAYS ]
--DATA ( { ALL | PRIMARY KEY | UNIQUE | FOREIGN KEY } ) COLUMNS
--NOPARALLEL
--PARALLEL integer
''')
def TemplateDropTable(self):
return Template('''DROP TABLE #table_name#
--CASCADE CONSTRAINTS
--PURGE
''')
def TemplateCreateColumn(self):
return Template('''ALTER TABLE #table_name#
ADD name data_type
--SORT
--DEFAULT expr
--NOT NULL
''')
def TemplateAlterColumn(self):
return Template('''ALTER TABLE #table_name#
--MODIFY #column_name# { datatype | DEFAULT expr | [ NULL | NOT NULL ]}
--RENAME COLUMN #column_name# TO new_name
'''
)
def TemplateDropColumn(self):
return Template('''ALTER TABLE #table_name#
DROP COLUMN #column_name#
--CASCADE CONSTRAINTS
--INVALIDATE
''')
def TemplateCreatePrimaryKey(self):
return Template('''ALTER TABLE #table_name#
ADD CONSTRAINT name
PRIMARY KEY ( column_name [, ... ] )
--[ NOT ] DEFERRABLE
--INITIALLY { IMMEDIATE | DEFERRED }
--RELY | NORELY
--USING INDEX index_name
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--EXCEPTIONS INTO table_name
''')
def TemplateDropPrimaryKey(self):
return Template('''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''')
def TemplateCreateUnique(self):
return Template('''ALTER TABLE #table_name#
ADD CONSTRAINT name
UNIQUE ( column_name [, ... ] )
--[ NOT ] DEFERRABLE
--INITIALLY { IMMEDIATE | DEFERRED }
--RELY | NORELY
--USING INDEX index_name
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--EXCEPTIONS INTO table_name
''')
def TemplateDropUnique(self):
return Template('''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''')
def TemplateCreateForeignKey(self):
return Template('''ALTER TABLE #table_name#
ADD CONSTRAINT name
FOREIGN KEY ( column_name [, ... ] )
REFERENCES reftable [ ( refcolumn [, ... ] ) ]
--[ NOT ] DEFERRABLE
--INITIALLY { IMMEDIATE | DEFERRED }
--RELY | NORELY
--USING INDEX index_name
--ENABLE
--DISABLE
--VALIDATE
--NOVALIDATE
--EXCEPTIONS INTO table_name
''')
def TemplateDropForeignKey(self):
return Template('''ALTER TABLE #table_name#
DROP CONSTRAINT #constraint_name#
--CASCADE
''')
def TemplateCreateIndex(self):
return Template('''CREATE [ UNIQUE ] INDEX name
ON #table_name#
( { column_name | ( expression ) } [ ASC | DESC ] )
--ONLINE
--TABLESPACE tablespace
--[ SORT | NOSORT ]
--REVERSE
--[ VISIBLE | INVISIBLE ]
--[ NOPARALLEL | PARALLEL integer ]
''')
def TemplateAlterIndex(self):
return Template('''ALTER INDEX #index_name#
--COMPILE
--[ ENABLE | DISABLE ]
--UNUSABLE
--[ VISIBLE | INVISIBLE ]
--RENAME TO new_name
--COALESCE
--[ MONITORING | NOMONITORING ] USAGE
--UPDATE BLOCK REFERENCES
''')
def TemplateDropIndex(self):
return Template('''DROP INDEX #index_name#
--FORCE
''')
def TemplateCreateSequence(self):
return Template('''CREATE SEQUENCE #schema_name#.name
--INCREMENT BY increment
--MINVALUE minvalue | NOMINVALUE
--MAXVALUE maxvalue | NOMAXVALUE
--START WITH start
--CACHE cache | NOCACHE
--CYCLE | NOCYCLE
--ORDER | NOORDER
''')
def TemplateAlterSequence(self):
return Template('''ALTER SEQUENCE #sequence_name#
--INCREMENT BY increment
--MINVALUE minvalue | NOMINVALUE
--MAXVALUE maxvalue | NOMAXVALUE
--CACHE cache | NOCACHE
--CYCLE | NOCYCLE
--ORDER | NOORDER
''')
def TemplateDropSequence(self):
return Template('DROP SEQUENCE #sequence_name#')
def TemplateCreateView(self):
return Template('''CREATE OR REPLACE VIEW #schema_name#.name AS
SELECT ...
''')
def TemplateDropView(self):
return Template('''DROP VIEW #view_name#
--CASCADE CONSTRAINTS
''')
def TemplateSelect(self, p_schema, p_table):
v_sql = 'SELECT t.'
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
v_sql += '\nFROM {0}.{1} t'.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql += '\nORDER BY t.'
v_sql += '\n , t.'.join([r['column_name'] for r in v_fields.Rows])
return Template(v_sql)
def TemplateInsert(self, p_schema, p_table):
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql = 'INSERT INTO {0}.{1} (\n'.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append(' ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['column_name'] in v_pk_fields:
v_values.append('\n , ? -- {0} {1} PRIMARY KEY'.format(r['column_name'], r['data_type']))
elif r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
else:
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
v_sql += ' {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append(' ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append(' ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_first = False
else:
v_sql += '\n , {0}'.format(r['column_name'])
if r['nullable'] == 'YES':
v_values.append('\n , ? -- {0} {1} NULLABLE'.format(r['column_name'], r['data_type']))
else:
v_values.append('\n , ? -- {0} {1}'.format(r['column_name'], r['data_type']))
v_sql += '\n) VALUES (\n'
for v in v_values:
v_sql += v
v_sql += '\n)'
else:
v_sql = ''
return Template(v_sql)
def TemplateUpdate(self, p_schema, p_table):
v_fields = self.QueryTablesFields(p_table, False, p_schema)
if len(v_fields.Rows) > 0:
v_sql = 'UPDATE {0}.{1}\nSET '.format(p_schema, p_table)
v_pk = self.QueryTablesPrimaryKeys(p_table, False, p_schema)
if len(v_pk.Rows) > 0:
v_table_pk_fields = self.QueryTablesPrimaryKeysColumns(v_pk.Rows[0]['constraint_name'], p_table, False, p_schema)
v_pk_fields = [r['column_name'] for r in v_table_pk_fields.Rows]
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
if r['column_name'] in v_pk_fields:
v_sql += '{0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['column_name'] in v_pk_fields:
v_sql += '\n , {0} = ? -- {1} PRIMARY KEY'.format(r['column_name'], r['data_type'])
elif r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
else:
v_values = []
v_first = True
for r in v_fields.Rows:
if v_first:
if r['nullable'] == 'YES':
v_sql += '{0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '{0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_first = False
else:
if r['nullable'] == 'YES':
v_sql += '\n , {0} = ? -- {1} NULLABLE'.format(r['column_name'], r['data_type'])
else:
v_sql += '\n , {0} = ? -- {1}'.format(r['column_name'], r['data_type'])
v_sql += '\nWHERE condition'
else:
v_sql = ''
return Template(v_sql)
def TemplateDelete(self):
return Template('''DELETE FROM #table_name#
WHERE condition
''')
def GetProperties(self, p_schema, p_object, p_type):
if p_type == 'role':
v_table1 = self.v_connection.Query('''
select username as "User",
user_id as "ID",
account_status as "Status",
lock_date as "Lock Date",
expiry_date as "Expiry Date",
default_tablespace as "Default Tablespace",
temporary_tablespace as "Temporary Tablespace",
created as "Creation Date",
initial_rsrc_consumer_group as "Group",
authentication_type as "Authentication Type"
from dba_users
where (case when upper(replace(username, ' ', '')) <> username then '"' || username || '"' else username end) = '{0}'
'''.format(p_object), True, True).Transpose('Property', 'Value')
elif p_type == 'tablespace':
v_table1 = self.v_connection.Query('''
select tablespace_name as "Tablespace",
block_size as "Block Size",
initial_extent as "Initial Extent",
next_extent as "Next Extent",
min_extents as "Min Extents",
max_extents as "Max Extents",
max_size as "Max Size",
pct_increase as "Percent Increase",
min_extlen as "Min Extent Length",
status as "Status",
contents as "Contents",
logging as "Logging",
force_logging as "Force Logging",
extent_management as "Extent Management",
allocation_type as "Allocation Type",
plugged_in as "Plugged In",
segment_space_management as "Segment Space Management",
def_tab_compression as "Deferrable Compression",
retention as "Retention",
bigfile as "Big File",
predicate_evaluation as "Predicate Evaluation",
encrypted as "Encrypted",
compress_for as "Compression Format"
from dba_tablespaces
where (case when upper(replace(tablespace_name, ' ', '')) <> tablespace_name then '"' || tablespace_name || '"' else tablespace_name end) = '{0}'
'''.format(p_object), True, True).Transpose('Property', 'Value')
else:
v_table1 = self.v_connection.Query('''
select owner as "Owner",
object_name as "Object Name",
object_id as "Object ID",
object_type as "Object Type",
created as "Created",
last_ddl_time as "Last DDL Time",
timestamp as "Timestamp",
status as "Status",
temporary as "Temporary",
generated as "Generated",
secondary as "Secondary"
from all_objects
where (case when upper(replace(owner, ' ', '')) <> owner then '"' || owner || '"' else owner end) = '{0}'
and (case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) = '{1}'
and subobject_name is null
'''.format(self.v_schema, p_object), True, True).Transpose('Property', 'Value')
if p_type == 'sequence':
v_table2 = self.v_connection.Query('''
select last_number as "Last Value",
min_value as "Min Value",
max_value as "Max Value",
increment_by as "Increment By",
cycle_flag as "Is Cached",
order_flag as "Is Ordered",
cache_size as "Cache Size"
from all_sequences
where (case when upper(replace(sequence_owner, ' ', '')) <> sequence_owner then '"' || sequence_owner || '"' else sequence_owner end) = '{0}'
and (case when upper(replace(sequence_name, ' ', '')) <> sequence_name then '"' || sequence_name || '"' else sequence_name end) = '{1}'
'''.format(self.v_schema, p_object), True, True).Transpose('Property', 'Value')
v_table1.Merge(v_table2)
return v_table1
def GetDDL(self, p_schema, p_table, p_object, p_type):
if p_type == 'role' or p_type == 'tablespace' or p_type == 'database':
return ' '
else:
return self.v_connection.ExecuteScalar('''
select dbms_lob.substr(dbms_metadata.get_ddl(object_type, object_name), 4000, 1) as ddl
from user_objects
where (case when upper(replace(object_name, ' ', '')) <> object_name then '"' || object_name || '"' else object_name end) = '{0}'
'''.format(p_object))
def GetAutocompleteValues(self, p_columns, p_filter):
return None
| mit | -7,605,886,198,247,150,000 | 50.492571 | 404 | 0.543508 | false |
miing/mci_migo_packages_gargoyle | gargoyle/signals.py | 9 | 2918 | import django.dispatch
#: This signal is sent when a switch is added (similar to Django's post_save, when
#: created is True).
#:
#: Example subscriber::
#:
#: def switch_added_callback(sender, request, switch, **extra):
#: logging.debug('Switch was added: %r', switch.label)
#:
#: from gargoyle.signals import switch_added
#: switch_added.connect(switch_added_callback)
switch_added = django.dispatch.Signal(providing_args=["request", "switch"])
#: This signal is sent when a switch is deleted (similar to Django's post_delete).
#:
#: Example subscriber::
#:
#: def switch_deleted_callback(sender, request, switch, **extra):
#: logging.debug('Switch was deleted: %r', switch.label)
#:
#: from gargoyle.signals import switch_deleted
#: switch_deleted.connect(switch_deleted_callback)
switch_deleted = django.dispatch.Signal(providing_args=["request", "switch"])
#: This signal is sent when a switch is updated (similar to Django's post_save, when
#: created is False).
#:
#: Example subscriber::
#:
#: def switch_updated_callback(sender, request, switch, **extra):
#: logging.debug('Switch was updated: %r', switch.label)
#:
#: from gargoyle.signals import switch_updated
#: switch_updated.connect(switch_updated_callback)
switch_updated = django.dispatch.Signal(providing_args=["request", "switch", "changes"])
#: This signal is sent when a condition is removed from a switch.
#:
#: Example subscriber::
#:
#: def switch_status_updated_callback(sender, request, switch, status, **extra):
#: logging.debug('Switch has updated status: %r; %r', switch.label, status)
#:
#: from gargoyle.signals import switch_status_updated
#: switch_status_updated.connect(switch_status_updated_callback)
switch_status_updated = django.dispatch.Signal(providing_args=["request", "switch", "status", "old_status"])
#: This signal is sent when a condition is added to a switch.
#:
#: Example subscriber::
#:
#: def switch_condition_added_callback(sender, request, switch, condition, **extra):
#: logging.debug('Switch has new condition: %r; %r', switch.label, condition)
#:
#: from gargoyle.signals import switch_condition_added
#: switch_condition_added.connect(switch_condition_added_callback)
switch_condition_added = django.dispatch.Signal(providing_args=["request", "switch", "condition"])
#: This signal is sent when a condition is removed from a switch.
#:
#: Example subscriber::
#:
#: def switch_condition_deleted_callback(sender, request, switch, condition, **extra):
#: logging.debug('Switch has deleted condition: %r; %r', switch.label, condition)
#:
#: from gargoyle.signals import switch_condition_deleted
#: switch_condition_deleted.connect(switch_condition_deleted_callback)
switch_condition_removed = django.dispatch.Signal(providing_args=["request", "switch", "condition"])
| apache-2.0 | 6,150,923,653,719,783,000 | 41.289855 | 108 | 0.701851 | false |
SnakeJenny/TensorFlow | tensorflow/contrib/distributions/python/ops/distribution.py | 7 | 37518 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape_tensor", "batch_shape", "event_shape_tensor", "event_shape",
"sample", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "stddev", "mode",
"covariance"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create a
# non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str = old_str or ""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
class ReparameterizationType(object):
"""Instances of this class represent how sampling is reparameterized.
Two static instances exist in the distritributions library, signifying
one of two possible properties for samples from a distribution:
`FULLY_REPARAMETERIZED`: Samples from the distribution are fully
reparameterized, and straight-through gradients are supported.
`NOT_REPARAMETERIZED`: Samples from the distribution are not fully
reparameterized, and straight-through gradients are either partially
unsupported or are not supported at all. In this case, for purposes of
e.g. RL or variational inference, it is generally safest to wrap the
sample results in a `stop_gradients` call and instead use policy
gradients / surrogate loss instead.
"""
def __init__(self, rep_type):
self._rep_type = rep_type
def __repr__(self):
return "<Reparameteriation Type: %s>" % self._rep_type
def __eq__(self, other):
"""Determine if this `ReparameterizationType` is equal to another.
Since RepaparameterizationType instances are constant static global
instances, equality checks if two instances' id() values are equal.
Args:
other: Object to compare against.
Returns:
`self is other`.
"""
return self is other
# Fully reparameterized distribution: samples from a fully
# reparameterized distribution support straight-through gradients with
# respect to all parameters.
FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED")
# Not reparameterized distribution: samples from a non-
# reparameterized distribution do not support straight-through gradients for
# at least some of the parameters.
NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED")
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
#### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
#### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.event_shape
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape_tensor()
# Sampling returns a sample per distribution. `samples` has shape
# [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5,
# batch_shape=[2, 2], and event_shape=[].
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
#### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `concentration1` and `concentration0`, and does not have
well-defined mode if `concentration1 < 1` or `concentration0 < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
reparameterization_type,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
reparameterization_type: Instance of `ReparameterizationType`.
If `distributions.FULLY_REPARAMETERIZED`, this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution. If `distributions.NOT_REPARAMETERIZED`,
then no such reparameterization is available.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
parameters: Python `dict` of parameters used to instantiate this
`Distribution`.
graph_parents: Python `list` of graph prerequisites of this
`Distribution`.
name: Python `str` name prefixed to Ops created by this class. Default:
subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not tensor_util.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
self._dtype = dtype
self._reparameterization_type = reparameterization_type
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters or {}
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`.
Subclasses should override class method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. `TensorShape`) shapes.
This is a class method that describes what key/value arguments are required
to instantiate the given `Distribution` so that a particular shape is
returned for that instance's call to `sample()`. Assumes that the sample's
shape is known statically.
Subclasses should override class method `_param_shapes` to return
constant-valued tensors when constant values are fed.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
# Remove "self", "__class__", or other special variables. These can appear
# if the subclass used `parameters = locals()`.
return dict((k, v) for k, v in self._parameters.items()
if not k.startswith("__") and k != "self")
@property
def reparameterization_type(self):
"""Describes how samples from the distribution are reparameterized.
Currently this is one of the static instances
`distributions.FULLY_REPARAMETERIZED`
or `distributions.NOT_REPARAMETERIZED`.
Returns:
An instance of `ReparameterizationType`.
"""
return self._reparameterization_type
@property
def allow_nan_stats(self):
"""Python `bool` describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance of a
Cauchy distribution is infinity. However, sometimes the statistic is
undefined, e.g., if a distribution's pdf does not achieve a maximum within
the support of the distribution, the mode is undefined. If the mean is
undefined, then by definition the variance is undefined. E.g. the mean for
Student's T for df = 1 is undefined (no clear way to say it is either + or -
infinity), so the variance = E[(X - mean)**2] is also undefined.
Returns:
allow_nan_stats: Python `bool`.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python `bool` indicating possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
initialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` initialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
return type(self)(**parameters)
def _batch_shape_tensor(self):
raise NotImplementedError("batch_shape_tensor is not implemented")
def batch_shape_tensor(self, name="batch_shape_tensor"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
if self.batch_shape.is_fully_defined():
return ops.convert_to_tensor(self.batch_shape.as_list(),
dtype=dtypes.int32,
name="batch_shape")
return self._batch_shape_tensor()
def _batch_shape(self):
return tensor_shape.TensorShape(None)
@property
def batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
May be partially defined or unknown.
The batch dimensions are indexes into independent, non-identical
parameterizations of this distribution.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._batch_shape()
def _event_shape_tensor(self):
raise NotImplementedError("event_shape_tensor is not implemented")
def event_shape_tensor(self, name="event_shape_tensor"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
if self.event_shape.is_fully_defined():
return ops.convert_to_tensor(self.event_shape.as_list(),
dtype=dtypes.int32,
name="event_shape")
return self._event_shape_tensor()
def _event_shape(self):
return tensor_shape.TensorShape(None)
@property
def event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
May be partially defined or unknown.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._event_shape()
def is_scalar_event(self, name="is_scalar_event"):
"""Indicates that `event_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_event: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.event_shape, self.event_shape_tensor),
name="is_scalar_event")
def is_scalar_batch(self, name="is_scalar_batch"):
"""Indicates that `batch_shape == []`.
Args:
name: The name to give this op.
Returns:
is_scalar_batch: `bool` scalar `Tensor`.
"""
with self._name_scope(name):
return ops.convert_to_tensor(
self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor),
name="is_scalar_batch")
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def _call_sample_n(self, sample_shape, seed, name, **kwargs):
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
sample_shape, n = self._expand_sample_shape_to_vector(
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
return self._call_sample_n(sample_shape, seed, name)
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def _call_log_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_prob(value, name)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def _call_prob(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob"):
"""Probability density/mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_prob(value, name)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def _call_log_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_log_cdf(value, name)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def _call_cdf(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```none
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
return self._call_cdf(value, name)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def _call_log_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log1p(-self.cdf(value, **kwargs))
except NotImplementedError:
raise original_exception
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```none
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_log_survival_function(value, name)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def _call_survival_function(self, value, name, **kwargs):
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **kwargs)
except NotImplementedError:
raise original_exception
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```none
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
return self._call_survival_function(value, name)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance.
Variance is defined as,
```none
Var = E[(X - E[X])**2]
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `Var.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
variance: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._variance()
except NotImplementedError as original_exception:
try:
return math_ops.square(self._stddev())
except NotImplementedError:
raise original_exception
def _stddev(self):
raise NotImplementedError("stddev is not implemented")
def stddev(self, name="stddev"):
"""Standard deviation.
Standard deviation is defined as,
```none
stddev = E[(X - E[X])**2]**0.5
```
where `X` is the random variable associated with this distribution, `E`
denotes expectation, and `stddev.shape = batch_shape + event_shape`.
Args:
name: The name to give this op.
Returns:
stddev: Floating-point `Tensor` with shape identical to
`batch_shape + event_shape`, i.e., the same shape as `self.mean()`.
"""
with self._name_scope(name):
try:
return self._stddev()
except NotImplementedError as original_exception:
try:
return math_ops.sqrt(self._variance())
except NotImplementedError:
raise original_exception
def _covariance(self):
raise NotImplementedError("covariance is not implemented")
def covariance(self, name="covariance"):
"""Covariance.
Covariance is (possibly) defined only for non-scalar-event distributions.
For example, for a length-`k`, vector-valued distribution, it is calculated
as,
```none
Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])]
```
where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E`
denotes expectation.
Alternatively, for non-vector, multivariate distributions (e.g.,
matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices
under some vectorization of the events, i.e.,
```none
Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above]
```
where `Cov` is a (batch of) `k' x k'` matrices,
`0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function
mapping indices of this distribution's event dimensions to indices of a
length-`k'` vector.
Args:
name: The name to give this op.
Returns:
covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']`
where the first `n` dimensions are batch coordinates and
`k' = reduce_prod(self.event_shape)`.
"""
with self._name_scope(name):
return self._covariance()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
([] if values is None else values) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape_to_vector(self, x, name):
"""Helper to `sample` which ensures input is 1D."""
x_static_val = tensor_util.constant_value(x)
if x_static_val is None:
prod = math_ops.reduce_prod(x)
else:
prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype())
ndims = x.get_shape().ndims # != sample_ndims
if ndims is None:
# Maybe expand_dims.
ndims = array_ops.rank(x)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array([1], dtype=np.int32),
array_ops.shape(x))
x = array_ops.reshape(x, expanded_shape)
elif ndims == 0:
# Definitely expand_dims.
if x_static_val is not None:
x = ops.convert_to_tensor(
np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()),
name=name)
else:
x = array_ops.reshape(x, [1])
elif ndims != 1:
raise ValueError("Input is neither scalar nor vector.")
return x, prod
def _set_sample_static_shape(self, x, sample_shape):
"""Helper to `sample`; sets static shape info."""
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(sample_shape))
ndims = x.get_shape().ndims
sample_ndims = sample_shape.ndims
batch_ndims = self.batch_shape.ndims
event_ndims = self.event_shape.ndims
# Infer rank(x).
if (ndims is None and
sample_ndims is not None and
batch_ndims is not None and
event_ndims is not None):
ndims = sample_ndims + batch_ndims + event_ndims
x.set_shape([None] * ndims)
# Infer sample shape.
if ndims is not None and sample_ndims is not None:
shape = sample_shape.concatenate([None]*(ndims - sample_ndims))
x.set_shape(x.get_shape().merge_with(shape))
# Infer event shape.
if ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape(
[None]*(ndims - event_ndims)).concatenate(self.event_shape)
x.set_shape(x.get_shape().merge_with(shape))
# Infer batch shape.
if batch_ndims is not None:
if ndims is not None:
if sample_ndims is None and event_ndims is not None:
sample_ndims = ndims - batch_ndims - event_ndims
elif event_ndims is None and sample_ndims is not None:
event_ndims = ndims - batch_ndims - sample_ndims
if sample_ndims is not None and event_ndims is not None:
shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate(
self.batch_shape).concatenate([None]*event_ndims)
x.set_shape(x.get_shape().merge_with(shape))
return x
def _is_scalar_helper(self, static_shape, dynamic_shape_fn):
"""Implementation for `is_scalar_batch` and `is_scalar_event`."""
if static_shape.ndims is not None:
return static_shape.ndims == 0
shape = dynamic_shape_fn()
if (shape.get_shape().ndims is not None and
shape.get_shape()[0].value is not None):
# If the static_shape is correctly written then we should never execute
# this branch. We keep it just in case there's some unimagined corner
# case.
return shape.get_shape().as_list() == [0]
return math_ops.equal(array_ops.shape(shape)[0], 0)
| apache-2.0 | -4,690,143,953,237,497,000 | 33.771084 | 80 | 0.657738 | false |
oconnor663/fbmessenger | fbmessenger/external.py | 1 | 12462 | import webbrowser
import json
import inspect
import time
from PyQt4 import QtCore
from . import network
from . import settings
from . import mqtt
from . import event
from . import windows
from . import application
# methods on the external object with this decorator are exposed to js
def external_decorator(*types, **results):
qt_decorator = QtCore.pyqtSlot(*types, **results)
def decorator(function):
def wrapper(self, *args):
# Put special stuff here
return function(self, *args)
wrapper.__name__ = function.__name__
return qt_decorator(wrapper)
return decorator
# prints a message to remind me to implement this function
def fake_external_decorator(*types, **results):
def _truncate(s):
maxlen = 50
if len(s) > maxlen:
return s[:maxlen-3] + '...'
else:
return s
qt_decorator = QtCore.pyqtSlot(*types, **results)
def decorator(function):
def wrapper(self, *args):
# Put special stuff here
arg_names = inspect.getargspec(function)[0][1:]
frame = inspect.currentframe()
arg_values = inspect.getargvalues(frame)[3]['args']
args_str = ", ".join(a + "=" + _truncate(repr(b))
for (a, b) in zip(arg_names, arg_values))
print("FAKE {0}({1})".format(function.__name__, args_str))
return function(self, *args)
wrapper.__name__ = function.__name__
return qt_decorator(wrapper)
return decorator
def init():
windows.main_window.bind_external(External(windows.main_window))
windows.chat_window.bind_external(External(windows.chat_window))
windows.toast_window.bind_external(External(windows.toast_window))
### main window JS events
def main_window_moved_or_resized():
# js doesn't listen for a separate resize event
arbiter_inform_all("FbDesktop.mainWindowMoved", None)
event.subscribe(windows.main_window.MOVE_EVENT,
main_window_moved_or_resized)
event.subscribe(windows.main_window.RESIZE_EVENT,
main_window_moved_or_resized)
def main_window_activated():
arbiter_inform_all("FbDesktop.mainWindowActivated", None)
event.subscribe(windows.main_window.ACTIVATE_EVENT, main_window_activated)
def main_window_deactivated():
arbiter_inform_all("FbDesktop.mainWindowDeactivated", None)
event.subscribe(windows.main_window.DEACTIVATE_EVENT,
main_window_deactivated)
### chat window js events
def chat_window_activated():
bump_last_active_time()
arbiter_inform_all("FbdChat.chatWindowActivated", None)
event.subscribe(windows.chat_window.ACTIVATE_EVENT, chat_window_activated)
### mqtt events
def mqtt_message_received(topic, payload):
arbiter_inform_all("FbDesktop.mqtt_" + topic, payload)
event.subscribe(mqtt.MESSAGE_RECEIVED_EVENT, mqtt_message_received)
def mqtt_connection_changed(new_value):
arbiter_inform_all("FbDesktop.mqttConnectionChanged", new_value)
event.subscribe(mqtt.CONNECTION_CHANGED_EVENT, mqtt_connection_changed)
def arbiter_inform_all(eventname, payload):
for externalobj in External._instances:
externalobj.arbiter_inform_local(eventname, payload)
# Detecting user input to determine idleness isn't possible in a portable way.
# We use a number of different events as a substitute for input:
# - chat window activated
# - chat tabs switched (via the 'open_threads' setting)
# - user starts typing (via the '/typing' mqtt topic)
_last_active_time = time.time()
def bump_last_active_time():
global _last_active_time
_last_active_time = time.time()
def is_active():
idle_limit = 5 * 60 # 5 minutes
return time.time() - _last_active_time < idle_limit
class External(QtCore.QObject):
_instances = []
def __init__(self, browserWindow):
QtCore.QObject.__init__(self)
self._browserwindow = browserWindow
self._arbiter_name = None
self._instances.append(self)
@external_decorator(str, str)
def arbiterInformSerialized(self, eventname, payload):
# The contract here is that JS will serialize a value, and we will
# deserialize it before we pass it back in. (Recall that passing it
# back in entails serializing into json but then interpreting that
# string as a literal, no net change in serialization level.) This is
# because in some implementations, JS isn't capable of passing out
# arbitrary objects.
# PyQt4 seems to have a weird bug where, when the JS string passed in
# contains surrogate pairs (unicode chars that don't fit in a wchar,
# like these: "𝟘𝟙𝟚𝟛𝟜𝟝𝟞𝟟𝟠𝟡"), those pairs are parsed correctly into
# single Python characters, but an extra '\x00' character is appended
# to the end of the string for each pair. JSON decoding chokes on
# those, so we remove them here.
# TODO(jacko): Do we need a more general workaround for this issue?
remove_null_hack_payload = payload.strip('\x00')
deserialized_payload = json.loads(remove_null_hack_payload)
arbiter_inform_all(eventname, deserialized_payload)
def arbiter_inform_local(self, eventname, payload):
if self._arbiter_name:
self._browserwindow.call_js_function(
self._arbiter_name, eventname, payload)
@external_decorator()
def captureMouseWheel(self):
# no-op
pass
@external_decorator()
def clearHeartBeat(self):
# no-op
pass
@fake_external_decorator(str, str, result=int)
def asyncConfirm(self, message, caption):
return 0
@external_decorator(str)
def debugLog(self, text):
print(text)
@external_decorator(result=str)
def getAccessToken(self):
uid, token = settings.get_user_info()
return token
@external_decorator(str, result=bool)
def getCapability(self, capabilityName):
# TODO(jacko): implement ticker flyouts etc.
return False
@external_decorator(str, result=str)
def getSetting(self, key):
val = settings.get_setting(key)
return val
@fake_external_decorator(result=str)
def getStateBlob(self):
return ''
@external_decorator(str, result=str)
def getValue(self, key):
val = settings.get_value(key)
return val
@fake_external_decorator(result=str)
def getVersion(self):
return ''
@external_decorator(result=bool)
def hasAccessToken(self):
uid, token = settings.get_user_info()
return bool(token)
@external_decorator()
def heartBeat(self):
# no-op
pass
@external_decorator()
def invalidateAccessToken(self):
settings.set_user_info('', '')
@external_decorator(result=bool)
def isIdle(self):
return not is_active()
@external_decorator(result=bool)
def isMqttConnected(self):
return mqtt.is_connected
@external_decorator(result=bool)
def isToastVisible(self):
return windows.toast_window.is_visible()
@external_decorator(str, str)
def logEvent(self, name, payload):
# no-op
pass
@external_decorator(str, str, str)
def logEvent2(self, category, name, payload):
# no-op
pass
@external_decorator(str)
def mqttSubscribe(self, topic):
mqtt.subscribe(topic)
@external_decorator(str)
def navigateBrowserToUrl(self, url):
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
webbrowser.open(url)
@external_decorator(str)
def navigateWindowToUrl(self, url):
self._browserwindow.navigate(url)
@external_decorator(str, str, str, str)
def postWebRequest(self, url, callback, method, poststr):
def _callback(reply):
self._browserwindow.call_js_function(callback, reply)
network.AsyncRequest(url, _callback,
poststr if method.upper() == "POST" else None)
@external_decorator()
def recycle(self):
self._browserwindow.refresh()
@external_decorator()
def releaseMouseWheel(self):
# no-op
pass
@external_decorator(str, str)
def setAccessToken(self, uid, token):
settings.set_user_info(uid, token)
@external_decorator(str)
def setArbiterInformCallback(self, callback):
self._arbiter_name = callback
@external_decorator(int)
def setIcon(self, icon_id):
# TODO(jacko) do something with this
pass
@external_decorator(str, str)
def setSetting(self, key, value):
if key == "open_threads":
# This setting is changed whenever the user opens a new chat tab,
# or when the chat window is opened from being closed (this doesn't
# matter, because it's not foregrounded), but not when a new tab is
# opened in a foregrounded window by receiving a new message, so it
# works for us as one signal of user activity.
bump_last_active_time()
settings.set_setting(key, value)
@external_decorator(str, str)
def setValue(self, key, value):
settings.set_value(key, value)
@fake_external_decorator()
def showChatDevTools(self):
pass
@fake_external_decorator()
def showDevTools(self):
pass
@fake_external_decorator()
def showSidebarDevTools(self):
pass
@fake_external_decorator()
def showToastDevTools(self):
pass
@fake_external_decorator()
def showFlyoutDevTools(self):
pass
@fake_external_decorator()
def showDialogDevTools(self):
pass
@fake_external_decorator(str, str)
def showTickerFlyout(self, url, storyYPos):
pass
@fake_external_decorator()
def hideTickerFlyout(self):
pass
@fake_external_decorator()
def showDialog(self):
pass
@fake_external_decorator()
def hideDialog(self):
pass
@fake_external_decorator()
def hideMainWindow(self):
pass
@fake_external_decorator(result=bool)
def isDocked(self):
return False
@fake_external_decorator(result=bool)
def isWindowVisible(self):
return False
@fake_external_decorator()
def showMainWindow(self):
pass
@fake_external_decorator()
def toggleDock(self):
pass
@external_decorator()
def hideChatWindow(self):
windows.chat_window.hide()
@external_decorator(result=bool)
def isChatWindowActive(self):
return windows.chat_window.is_active()
@external_decorator()
def playIncomingMessageSound(self):
application.play_message_sound()
@external_decorator(str, str)
def sendMessage(self, topic, message):
if topic == '/typing':
bump_last_active_time()
mqtt.publish(topic, message)
@external_decorator(str)
def setChatWindowTitle(self, title):
windows.chat_window.set_title(title)
@external_decorator(bool)
def showChatWindow(self, bringtofront):
windows.show_chat_window()
if bringtofront:
windows.chat_window.activate()
else:
windows.chat_window.alert()
@external_decorator(int)
def setToastHeight(self, height):
windows.toast_window.set_size(windows.TOAST_WIDTH, height)
@external_decorator()
def showToast(self):
windows.show_toast()
@external_decorator()
def closeToast(self):
windows.toast_window.hide()
@external_decorator()
def fadeToast(self):
fade_ms = 2000
windows.toast_window.fade(fade_ms)
# The argument to showCustomToast is passed in as an actual JS object,
# rather than being serialized. (This worked on Mac, and the function
# wasn't used on Linux.) QVariant is the type that Qt needs to marshall it.
#
# JS checks for this function before calling it, so since it's currently a
# no-op we don't really need to provide it. I'm keeping is here mostly to
# document the call signature for future me. TODO(jacko): Use this?
@external_decorator(QtCore.QVariant)
def showCustomToast(self, blob):
pass
| bsd-3-clause | 3,648,775,770,253,928,400 | 30.714286 | 79 | 0.646236 | false |
ProjectSWGCore/NGECore2 | scripts/mobiles/generic/faction/rebel/rebel_comm_op_14.py | 2 | 1514 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_rebel_comm_operator')
mobileTemplate.setLevel(14)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("rebel")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(True)
mobileTemplate.setFaction("rebel")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_rebel_communication_female_01.iff')
templates.add('object/mobile/shared_dressed_rebel_communication_male_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e5.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('rebel_comm_op_14', mobileTemplate)
return | lgpl-3.0 | -2,672,664,742,178,986,500 | 35.071429 | 125 | 0.830251 | false |
gdestuynder/mozdef_lib | mozdef.py | 1 | 5872 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
# Author: [email protected]
import os
import sys
import copy
from datetime import datetime
import pytz
import json
import socket
import syslog
try:
from requests_futures.sessions import FuturesSession as Session
futures_loaded = True
except ImportError:
from requests import Session
futures_loaded = False
class MozDefError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class MozDefMsg():
#If you need syslog emulation (flattens the msg and sends over syslog)
sendToSyslog = False
#This disables sending to MozDef - Generally you'll want sendToSyslog set to True then
syslogOnly = False
httpsession = Session()
#Turns off needless and repetitive .netrc check for creds
httpsession.trust_env = False
debug = False
verify_certificate = True
#Never fail (ie no unexcepted exceptions sent to user, such as server/network not responding)
fire_and_forget_mode = True
log = {}
log['timestamp'] = pytz.timezone('UTC').localize(datetime.utcnow()).isoformat()
log['hostname'] = socket.getfqdn()
log['processid'] = os.getpid()
log['processname'] = sys.argv[0]
log['severity'] = 'INFO'
log['summary'] = None
log['category'] = 'event'
log['tags'] = list()
log['details'] = dict()
def __init__(self, mozdef_hostname, summary=None, category='event', severity='INFO', tags=[], details={}):
self.summary = summary
self.category = category
self.severity = severity
self.tags = tags
self.details = details
self.mozdef_hostname = mozdef_hostname
def send(self, summary=None, category=None, severity=None, tags=None, details=None):
log_msg = copy.copy(self.log)
if summary == None: log_msg['summary'] = self.summary
else: log_msg['summary'] = summary
if category == None: log_msg['category'] = self.category
else: log_msg['category'] = category
if severity == None: log_msg['severity'] = self.severity
else: log_msg['severity'] = severity
if tags == None: log_msg['tags'] = self.tags
else: log_msg['tags'] = tags
if details == None: log_msg['details'] = self.details
else: log_msg['details'] = details
if type(log_msg['details']) != dict:
raise MozDefError('details must be a dict')
elif type(log_msg['tags']) != list:
raise MozDefError('tags must be a list')
elif log_msg['summary'] == None:
raise MozDefError('Summary is a required field')
if self.debug:
print(json.dumps(log_msg, sort_keys=True, indent=4))
if not self.syslogOnly:
try:
if futures_loaded:
r = self.httpsession.post(self.mozdef_hostname, json.dumps(log_msg, sort_keys=True, indent=4), verify=self.verify_certificate, background_callback=self.httpsession_cb)
else:
r = self.httpsession.post(self.mozdef_hostname, json.dumps(log_msg, sort_keys=True, indent=4), verify=self.verify_certificate)
except Exception as e:
if not self.fire_and_forget_mode:
raise e
if self.sendToSyslog:
syslog_msg = ''
syslog_severity = syslog.LOG_INFO
for i in log_msg:
# If present and if possible convert severity to a syslog field
if i == 'severity':
syslog_severity = self.str_to_syslog_severity(i)
continue
# These fields are already populated by syslog
if i == 'hostname' or i == 'processid' or i == 'timestamp' or i == 'processname':
continue
syslog_msg += str(i)+': \''+str(log_msg[i])+'\' '
syslog.syslog(syslog_severity, syslog_msg)
syslog.closelog()
def str_to_syslog_severity(self, severity):
if severity == 'INFO':
return syslog.LOG_INFO
elif severity == 'WARNING':
return syslog.LOG_WARNING
elif severity == 'CRIT' or severity == 'CRITICAL':
return syslog.LOG_CRIT
elif severity == 'ERR' or severity == 'ERROR':
return syslog.LOG_ERR
elif severity == 'DEBUG':
return syslog.LOG_DEBUG
return syslog.LOG_INFO
def httpsession_cb(self, session, response):
if response.result().status_code != 200:
if not self.fire_and_forget_mode:
raise MozDefError("HTTP POST failed with code %r" % response.result().status_code)
if __name__ == "__main__":
print("Testing the MozDef logging module (no msg sent over the network)")
print("Simple msg:")
msg = MozDefMsg('https://127.0.0.1/events')
# This prints out the msg in JSON to stdout
msg.debug = True
msg.send('test msg')
msg.sendToSyslog = True
msg.send('test syslog msg')
print("Complex msg:")
msg.sendToSyslog = False
msg.send('new test msg', 'authentication', 'CRITICAL', ['bro', 'auth'], {'uid': 0, 'username': 'kang'})
msg.sendToSyslog = True
msg.send('new test msg', 'authentication', 'CRITICAL', ['bro', 'auth'], {'uid': 0, 'username': 'kang'})
print("Modifying timestamp attribute:")
msg.sendToSyslog = False
msg.log['timestamp'] = pytz.timezone('Europe/Paris').localize(datetime.now()).isoformat()
msg.send('another test msg')
msg.sendToSyslog = True
msg.send('another test msg')
| mpl-2.0 | 4,901,658,105,695,021,000 | 37.12987 | 187 | 0.606097 | false |
juhuntenburg/pipelines | src/lsd_lemon/struct_preproc/mgzconvert.py | 2 | 4856 | from nipype.pipeline.engine import Node, Workflow
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.fsl as fsl
import os
'''
Workflow to extract relevant output from freesurfer directory
'''
def create_mgzconvert_pipeline(name='mgzconvert'):
# workflow
mgzconvert = Workflow(name='mgzconvert')
#inputnode
inputnode=Node(util.IdentityInterface(fields=['fs_subjects_dir',
'fs_subject_id',
]),
name='inputnode')
#outputnode
outputnode=Node(util.IdentityInterface(fields=['anat_head',
'anat_brain',
'func_mask',
'wmseg',
'wmedge']),
name='outputnode')
# import files from freesurfer
fs_import = Node(interface=nio.FreeSurferSource(),
name = 'fs_import')
# convert Freesurfer T1 file to nifti
head_convert=Node(fs.MRIConvert(out_type='niigz',
out_file='T1.nii.gz'),
name='head_convert')
# convert Freesurfer brain.finalsurf file to nifti
# grab finalsurf file
def grab_brain(fs_subjects_dir, fs_subject_id):
import os
brainfile = os.path.join(fs_subjects_dir, fs_subject_id,
'mri', 'brain.finalsurfs.mgz')
return os.path.abspath(brainfile)
brain_grab=Node(util.Function(input_names=['fs_subjects_dir',
'fs_subject_id'],
output_names=['brain_file'],
function=grab_brain),
name='brain_grab')
brain_convert=Node(fs.MRIConvert(out_type='niigz',
out_file='T1_brain.nii.gz'),
name='brain_convert')
# create brainmask from aparc+aseg with single dilation for functional data
# DIFFERENT APPROACHES TO MASK THE FUNCTIONAL AND STRUCTURAL DATA
# ARE USED FOR HISTORIC REASONS
def get_aparc_aseg(files):
for name in files:
if 'aparc+aseg' in name:
return name
funcmask = Node(fs.Binarize(min=0.5,
dilate=1,
out_type='nii.gz'),
name='funcmask')
# fill holes in mask, smooth, rebinarize
fillholes = Node(fsl.maths.MathsCommand(args='-fillh -s 3 -thr 0.1 -bin',
out_file='func_mask.nii.gz'),
name='fillholes')
# cortical and cerebellar white matter volumes to construct wm edge
# [lh cerebral wm, lh cerebellar wm, rh cerebral wm, rh cerebellar wm, brain stem]
wmseg = Node(fs.Binarize(out_type='nii.gz',
match = [2, 7, 41, 46, 16],
binary_file='T1_brain_wmseg.nii.gz'),
name='wmseg')
# make edge from wmseg to visualize coregistration quality
edge = Node(fsl.ApplyMask(args='-edge -bin',
out_file='T1_brain_wmedge.nii.gz'),
name='edge')
# connections
mgzconvert.connect([(inputnode, fs_import, [('fs_subjects_dir','subjects_dir'),
('fs_subject_id', 'subject_id')]),
(fs_import, head_convert, [('T1', 'in_file')]),
(inputnode, brain_grab, [('fs_subjects_dir', 'fs_subjects_dir'),
('fs_subject_id', 'fs_subject_id')]),
(brain_grab, brain_convert, [('brain_file', 'in_file')]),
(fs_import, wmseg, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(fs_import, funcmask, [(('aparc_aseg', get_aparc_aseg), 'in_file')]),
(funcmask, fillholes, [('binary_file', 'in_file')]),
(wmseg, edge, [('binary_file', 'in_file'),
('binary_file', 'mask_file')]),
(head_convert, outputnode, [('out_file', 'anat_head')]),
(fillholes, outputnode, [('out_file', 'func_mask')]),
(brain_convert, outputnode, [('out_file', 'anat_brain')]),
(wmseg, outputnode, [('binary_file', 'wmseg')]),
(edge, outputnode, [('out_file', 'wmedge')])
])
return mgzconvert | mit | 1,885,589,200,310,871,300 | 41.982301 | 93 | 0.474053 | false |
openstack/networking-mlnx | tools/i18n_cfg.py | 3 | 3451 | import compiler
import re
def is_log_callfunc(n):
"""LOG.xxx('hello %s' % xyz) and LOG('hello')"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_log_i18n_msg_with_mod(n):
"""LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)"""
if not isinstance(n.parent.parent, compiler.ast.Mod):
return False
n = n.parent.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Getattr):
if isinstance(n.parent.node.getChildNodes()[0],
compiler.ast.Name):
if n.parent.node.getChildNodes()[0].name == 'LOG':
return True
return False
def is_wrong_i18n_format(n):
"""Check _('hello %s' % xyz)"""
if isinstance(n.parent, compiler.ast.Mod):
n = n.parent
if isinstance(n.parent, compiler.ast.CallFunc):
if isinstance(n.parent.node, compiler.ast.Name):
if n.parent.node.name == '_':
return True
return False
"""
Used for check message need be localized or not.
(predicate_func, action, message)
"""
i18n_msg_predicates = [
# Skip ['hello world', 1]
(lambda n: isinstance(n.parent, compiler.ast.List), 'skip', ''),
# Skip {'hellow world', 1}
(lambda n: isinstance(n.parent, compiler.ast.Dict), 'skip', ''),
# Skip msg['hello world']
(lambda n: isinstance(n.parent, compiler.ast.Subscript), 'skip', ''),
# Skip doc string
(lambda n: isinstance(n.parent, compiler.ast.Discard), 'skip', ''),
# Skip msg = "hello", in normal, message should more than one word
(lambda n: len(n.value.strip().split(' ')) <= 1, 'skip', ''),
# Skip msg = 'hello world' + vars + 'world hello'
(lambda n: isinstance(n.parent, compiler.ast.Add), 'skip', ''),
# Skip xml markers msg = "<test></test>"
(lambda n: len(re.compile("</.*>").findall(n.value)) > 0, 'skip', ''),
# Skip sql statement
(lambda n: len(
re.compile("^SELECT.*FROM", flags=re.I).findall(n.value)) > 0,
'skip', ''),
# LOG.xxx()
(is_log_callfunc, 'error', 'Message must be localized'),
# _('hello %s' % xyz) should be _('hello %s') % xyz
(is_wrong_i18n_format, 'error',
("Message format was wrong, _('hello %s' % xyz) "
"should be _('hello %s') % xyz")),
# default
(lambda n: True, 'warn', 'Message might need localized')
]
"""
Used for checking message format. (checker_func, message)
"""
msg_format_checkers = [
# If message contain more than on format specifier, it should use
# mapping key
(lambda n: len(re.compile("%[bcdeEfFgGnosxX]").findall(n.value)) > 1,
"The message shouldn't contain more than one format specifier"),
# Check capital
(lambda n: n.value.split(' ')[0].count('_') == 0 and
n.value[0].isalpha() and
n.value[0].islower(),
"First letter must be capital"),
(is_log_i18n_msg_with_mod,
'LOG.xxx("Hello %s" % xyz) should be LOG.xxx("Hello %s", xyz)')
]
file_black_list = ["./networking_mlnx/tests/unit",
"./networking_mlnx/openstack"]
| apache-2.0 | 5,156,303,838,976,201,000 | 34.947917 | 74 | 0.587076 | false |
edulramirez/nova | nova/tests/unit/virt/vmwareapi/test_volumeops.py | 11 | 25449 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake as image_fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self._session = driver.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self._image_id = image_fake.get_valid_image_id()
self._instance_values = {
'name': 'fake_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': 'respool-1001(MyResPoolName)',
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(self._context,
**self._instance_values)
def _test_detach_disk_from_vm(self, destroy_disk=False):
def fake_call_method(module, method, *args, **kwargs):
vmdk_detach_config_spec = kwargs.get('spec')
virtual_device_config = vmdk_detach_config_spec.deviceChange[0]
self.assertEqual('remove', virtual_device_config.operation)
self.assertEqual('ns0:VirtualDeviceConfigSpec',
virtual_device_config.obj_name)
if destroy_disk:
self.assertEqual('destroy',
virtual_device_config.fileOperation)
else:
self.assertFalse(hasattr(virtual_device_config,
'fileOperation'))
return 'fake_configure_task'
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
fake_device = vmwareapi_fake.DataObject()
fake_device.backing = vmwareapi_fake.DataObject()
fake_device.backing.fileName = 'fake_path'
fake_device.key = 'fake_key'
self._volumeops.detach_disk_from_vm('fake_vm_ref', self._instance,
fake_device, destroy_disk)
_wait_for_task.assert_has_calls([
mock.call('fake_configure_task')])
def test_detach_with_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=True)
def test_detach_without_destroy_disk_from_vm(self):
self._test_detach_disk_from_vm(destroy_disk=False)
def _fake_call_get_object_property(self, uuid, result):
def fake_call_method(vim, method, vm_ref, prop):
expected_prop = 'config.extraConfig["volume-%s"]' % uuid
self.assertEqual('VirtualMachine', vm_ref._type)
self.assertEqual(expected_prop, prop)
return result
return fake_call_method
def test_get_volume_uuid(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
opt_val = vmwareapi_fake.OptionValue('volume-%s' % uuid, 'volume-val')
fake_call = self._fake_call_get_object_property(uuid, opt_val)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertEqual('volume-val', val)
def test_get_volume_uuid_not_found(self):
vm_ref = vmwareapi_fake.ManagedObjectReference('VirtualMachine',
'vm-134')
uuid = '1234'
fake_call = self._fake_call_get_object_property(uuid, None)
with mock.patch.object(self._session, "_call_method", fake_call):
val = self._volumeops._get_volume_uuid(vm_ref, uuid)
self.assertIsNone(val)
def test_attach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref'),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info)
) as (get_vm_ref, get_volume_ref, get_vmdk_info):
self.assertRaises(exception.Invalid,
self._volumeops._attach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
@mock.patch.object(vm_util, 'get_vm_extra_config_spec',
return_value=mock.sentinel.extra_config)
@mock.patch.object(vm_util, 'reconfigure_vm')
def test_update_volume_details(self, reconfigure_vm,
get_vm_extra_config_spec):
volume_uuid = '26f5948e-52a3-4ee6-8d48-0a379afd0828'
device_uuid = '0d86246a-2adb-470d-a9f7-bce09930c5d'
self._volumeops._update_volume_details(
mock.sentinel.vm_ref, volume_uuid, device_uuid)
get_vm_extra_config_spec.assert_called_once_with(
self._volumeops._session.vim.client.factory,
{'volume-%s' % volume_uuid: device_uuid})
reconfigure_vm.assert_called_once_with(self._volumeops._session,
mock.sentinel.vm_ref,
mock.sentinel.extra_config)
def test_detach_volume_vmdk(self):
vmdk_info = vm_util.VmdkInfo('fake-path', 'lsiLogic', 'thin',
1024, 'fake-device')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref',
return_value=mock.sentinel.volume_ref),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device',
return_value=mock.sentinel.device),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, '_consolidate_vmdk_volume'),
mock.patch.object(self._volumeops, 'detach_disk_from_vm'),
mock.patch.object(self._volumeops, '_update_volume_details'),
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info, consolidate_vmdk_volume, detach_disk_from_vm,
update_volume_details):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id':
'd11a82de-ddaa-448d-b50a-a255a7e61a1e'
}}
instance = mock.MagicMock(name='fake-name',
vm_state=vm_states.ACTIVE)
self._volumeops._detach_volume_vmdk(connection_info, instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
get_vmdk_info.assert_called_once_with(self._volumeops._session,
mock.sentinel.volume_ref)
consolidate_vmdk_volume.assert_called_once_with(
instance, mock.sentinel.vm_ref, mock.sentinel.device,
mock.sentinel.volume_ref, adapter_type=vmdk_info.adapter_type,
disk_type=vmdk_info.disk_type)
detach_disk_from_vm.assert_called_once_with(mock.sentinel.vm_ref,
instance,
mock.sentinel.device)
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'], "")
def test_detach_volume_vmdk_invalid(self):
connection_info = {'driver_volume_type': 'vmdk',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
instance = mock.MagicMock(name='fake-name', vm_state=vm_states.ACTIVE)
vmdk_info = vm_util.VmdkInfo('fake-path', constants.ADAPTER_TYPE_IDE,
constants.DISK_TYPE_PREALLOCATED, 1024,
'fake-device')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(self._volumeops,
'_get_vmdk_backed_disk_device'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info)
) as (get_vm_ref, get_volume_ref, get_vmdk_backed_disk_device,
get_vmdk_info):
self.assertRaises(exception.Invalid,
self._volumeops._detach_volume_vmdk, connection_info,
instance)
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
self.assertTrue(get_vmdk_info.called)
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
volume_device = mock.MagicMock()
volume_device.backing.fileName = 'fake-path'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', default_adapter_type,
disk_type, 1024,
device)
adapter_type = adapter_type or default_adapter_type
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(self._volumeops, 'attach_disk_to_vm'),
mock.patch.object(self._volumeops, '_update_volume_details')
) as (get_vm_ref, get_volume_ref, get_vmdk_info, attach_disk_to_vm,
update_volume_details):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
constants.DISK_TYPE_PREALLOCATED, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
vm_ref, connection_info['data']['volume_id'], disk_uuid)
def _test_attach_volume_iscsi(self, adapter_type=None):
connection_info = {'driver_volume_type': 'iscsi',
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
vm_ref = 'fake-vm-ref'
default_adapter_type = constants.DEFAULT_ADAPTER_TYPE
adapter_type = adapter_type or default_adapter_type
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_discover_target',
return_value=(mock.sentinel.device_name,
mock.sentinel.uuid)),
mock.patch.object(vm_util, 'get_scsi_adapter_type',
return_value=adapter_type),
mock.patch.object(self._volumeops, 'attach_disk_to_vm')
) as (get_vm_ref, iscsi_discover_target, get_scsi_adapter_type,
attach_disk_to_vm):
self._volumeops.attach_volume(connection_info, self._instance,
adapter_type)
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
iscsi_discover_target.assert_called_once_with(
connection_info['data'])
if adapter_type is None:
self.assertTrue(get_scsi_adapter_type.called)
attach_disk_to_vm.assert_called_once_with(vm_ref,
self._instance, adapter_type, 'rdmp',
device_name=mock.sentinel.device_name)
def test_attach_volume_vmdk(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_IDE,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_iscsi(adapter_type)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume')
def test_consolidate_vmdk_volume_with_no_relocate(
self, relocate_vmdk_volume, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
device = mock.Mock(backing=backing)
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
self._volumeops._consolidate_vmdk_volume(self._instance, vm_ref,
device, volume_ref)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
self.assertFalse(relocate_vmdk_volume.called)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_relocate(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vmdk_volume, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
instance = self._instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_vmdk_volume.assert_called_once_with(
volume_ref, rp, datastore, host)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device, destroy_disk=True)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
@mock.patch.object(volumeops.VMwareVolumeOps,
'_get_vmdk_base_volume_device')
@mock.patch.object(volumeops.VMwareVolumeOps, '_relocate_vmdk_volume')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_host_of_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, '_get_res_pool_of_host')
@mock.patch.object(volumeops.VMwareVolumeOps, 'detach_disk_from_vm')
@mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm')
def test_consolidate_vmdk_volume_with_missing_vmdk(
self, attach_disk_to_vm, detach_disk_from_vm, get_res_pool_of_host,
get_host_of_vm, relocate_vmdk_volume, get_vmdk_base_volume_device):
file_name = mock.sentinel.file_name
backing = mock.Mock(fileName=file_name)
original_device = mock.Mock(backing=backing)
get_vmdk_base_volume_device.return_value = original_device
new_file_name = mock.sentinel.new_file_name
datastore = mock.sentinel.datastore
new_backing = mock.Mock(fileName=new_file_name, datastore=datastore)
device = mock.Mock(backing=new_backing)
host = mock.sentinel.host
get_host_of_vm.return_value = host
rp = mock.sentinel.rp
get_res_pool_of_host.return_value = rp
relocate_vmdk_volume.side_effect = [
oslo_vmw_exceptions.FileNotFoundException, None]
instance = mock.sentinel.instance
volume_ref = mock.sentinel.volume_ref
vm_ref = mock.sentinel.vm_ref
adapter_type = constants.ADAPTER_TYPE_BUSLOGIC
disk_type = constants.DISK_TYPE_EAGER_ZEROED_THICK
self._volumeops._consolidate_vmdk_volume(instance, vm_ref, device,
volume_ref, adapter_type,
disk_type)
get_vmdk_base_volume_device.assert_called_once_with(volume_ref)
relocate_calls = [mock.call(volume_ref, rp, datastore, host),
mock.call(volume_ref, rp, datastore, host)]
self.assertEqual(relocate_calls, relocate_vmdk_volume.call_args_list)
detach_disk_from_vm.assert_called_once_with(
volume_ref, instance, original_device)
attach_disk_to_vm.assert_called_once_with(
volume_ref, instance, adapter_type, disk_type,
vmdk_path=new_file_name)
def test_iscsi_get_host_iqn(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with contextlib.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm, fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_iscsi_get_host_iqn_instance_not_found(self):
host_mor = mock.Mock()
iqn = 'iscsi-name'
hba = vmwareapi_fake.HostInternetScsiHba(iqn)
hbas = mock.MagicMock(HostHostBusAdapter=[hba])
with contextlib.nested(
mock.patch.object(vm_util, 'get_host_ref_for_vm',
side_effect=exception.InstanceNotFound('fake')),
mock.patch.object(vm_util, 'get_host_ref',
return_value=host_mor),
mock.patch.object(self._volumeops._session, '_call_method',
return_value=hbas)
) as (fake_get_host_ref_for_vm,
fake_get_host_ref,
fake_call_method):
result = self._volumeops._iscsi_get_host_iqn(self._instance)
fake_get_host_ref_for_vm.assert_called_once_with(
self._volumeops._session, self._instance)
fake_get_host_ref.assert_called_once_with(
self._volumeops._session, self._volumeops._cluster)
fake_call_method.assert_called_once_with(vutil,
"get_object_property",
host_mor,
"config.storageDevice.hostBusAdapter")
self.assertEqual(iqn, result)
def test_get_volume_connector(self):
vm_id = 'fake-vm'
vm_ref = mock.MagicMock(value=vm_id)
iqn = 'iscsi-name'
url = 'test_url'
self.flags(host_ip=url, group='vmware')
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._volumeops, '_iscsi_get_host_iqn',
return_value=iqn)
) as (fake_get_vm_ref, fake_iscsi_get_host_iqn):
connector = self._volumeops.get_volume_connector(self._instance)
fake_get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
fake_iscsi_get_host_iqn.assert_called_once_with(self._instance)
self.assertEqual(url, connector['ip'])
self.assertEqual(url, connector['host'])
self.assertEqual(iqn, connector['initiator'])
self.assertEqual(vm_id, connector['instance'])
| apache-2.0 | 1,792,248,067,057,657,600 | 48.129344 | 79 | 0.567724 | false |
ypwalter/evennia | evennia/contrib/tutorial_world/mob.py | 6 | 15701 | """
This module implements a simple mobile object with
a very rudimentary AI as well as an aggressive enemy
object based on that mobile class.
"""
import random
from evennia import TICKER_HANDLER
from evennia import search_object
from evennia import Command, CmdSet
from evennia import logger
from evennia.contrib.tutorial_world import objects as tut_objects
class CmdMobOnOff(Command):
"""
Activates/deactivates Mob
Usage:
mobon <mob>
moboff <mob>
This turns the mob from active (alive) mode
to inactive (dead) mode. It is used during
building to activate the mob once it's
prepared.
"""
key = "mobon"
aliases = "moboff"
locks = "cmd:superuser()"
def func(self):
"""
Uses the mob's set_alive/set_dead methods
to turn on/off the mob."
"""
if not self.args:
self.caller.msg("Usage: mobon|moboff <mob>")
return
mob = self.caller.search(self.args)
if not mob:
return
if self.cmdstring == "mobon":
mob.set_alive()
else:
mob.set_dead()
class MobCmdSet(CmdSet):
"""
Holds the admin command controlling the mob
"""
def at_cmdset_creation(self):
self.add(CmdMobOnOff())
class Mob(tut_objects.TutorialObject):
"""
This is a state-machine AI mobile. It has several states which are
controlled from setting various Attributes. All default to True:
patrolling: if set, the mob will move randomly
from room to room, but preferring to not return
the way it came. If unset, the mob will remain
stationary (idling) until attacked.
aggressive: if set, will attack Characters in
the same room using whatever Weapon it
carries (see tutorial_world.objects.Weapon).
if unset, the mob will never engage in combat
no matter what.
hunting: if set, the mob will pursue enemies trying
to flee from it, so it can enter combat. If unset,
it will return to patrolling/idling if fled from.
immortal: If set, the mob cannot take any damage.
irregular_echoes: list of strings the mob generates at irregular intervals.
desc_alive: the physical description while alive
desc_dead: the physical descripion while dead
send_defeated_to: unique key/alias for location to send defeated enemies to
defeat_msg: message to echo to defeated opponent
defeat_msg_room: message to echo to room. Accepts %s as the name of the defeated.
hit_msg: message to echo when this mob is hit. Accepts %s for the mob's key.
weapon_ineffective_msg: message to echo for useless attacks
death_msg: message to echo to room when this mob dies.
patrolling_pace: how many seconds per tick, when patrolling
aggressive_pace: -"- attacking
hunting_pace: -"- hunting
death_pace: -"- returning to life when dead
field 'home' - the home location should set to someplace inside
the patrolling area. The mob will use this if it should
happen to roam into a room with no exits.
"""
def at_init(self):
"""
When initialized from cache (after a server reboot), set up
the AI state.
"""
# The AI state machine (not persistent).
self.ndb.is_patrolling = self.db.patrolling and not self.db.is_dead
self.ndb.is_attacking = False
self.ndb.is_hunting = False
self.ndb.is_immortal = self.db.immortal or self.db.is_dead
def at_object_creation(self):
"""
Called the first time the object is created.
We set up the base properties and flags here.
"""
self.cmdset.add(MobCmdSet, permanent=True)
# Main AI flags. We start in dead mode so we don't have to
# chase the mob around when building.
self.db.patrolling = True
self.db.aggressive = True
self.db.immortal = False
# db-store if it is dead or not
self.db.is_dead = True
# specifies how much damage we divide away from non-magic weapons
self.db.damage_resistance = 100.0
# pace (number of seconds between ticks) for
# the respective modes.
self.db.patrolling_pace = 6
self.db.aggressive_pace = 2
self.db.hunting_pace = 1
self.db.death_pace = 100 # stay dead for 100 seconds
# we store the call to the tickerhandler
# so we can easily deactivate the last
# ticker subscription when we switch.
# since we will use the same idstring
# throughout we only need to save the
# previous interval we used.
self.db.last_ticker_interval = None
# store two separate descriptions, one for alive and
# one for dead (corpse)
self.db.desc_alive = "This is a moving object."
self.db.desc_dead = "A dead body."
# health stats
self.db.full_health = 20
self.db.health = 20
# when this mob defeats someone, we move the character off to
# some other place (Dark Cell in the tutorial).
self.db.send_defeated_to = "dark cell"
# text to echo to the defeated foe.
self.db.defeat_msg = "You fall to the ground."
self.db.defeat_msg_room = "%s falls to the ground."
self.db.weapon_ineffective_msg = "Your weapon just passes through your enemy, causing almost no effect!"
self.db.death_msg = "After the last hit %s evaporates." % self.key
self.db.hit_msg = "%s wails, shudders and writhes." % self.key
self.db.irregular_msgs = ["the enemy looks about.", "the enemy changes stance."]
self.db.tutorial_info = "This is an object with simple state AI, using a ticker to move."
def _set_ticker(self, interval, hook_key, stop=False):
"""
Set how often the given hook key should
be "ticked".
Args:
interval (int): The number of seconds
between ticks
hook_key (str): The name of the method
(on this mob) to call every interval
seconds.
stop (bool, optional): Just stop the
last ticker without starting a new one.
With this set, the interval and hook_key
arguments are unused.
In order to only have one ticker
running at a time, we make sure to store the
previous ticker subscription so that we can
easily find and stop it before setting a
new one. The tickerhandler is persistent so
we need to remember this across reloads.
"""
idstring = "tutorial_mob" # this doesn't change
last_interval = self.db.last_ticker_interval
if last_interval:
# we have a previous subscription, kill this first.
TICKER_HANDLER.remove(self, last_interval, idstring)
self.db.last_ticker_interval = interval
if not stop:
# set the new ticker
TICKER_HANDLER.add(self, interval, idstring, hook_key)
def _find_target(self, location):
"""
Scan the given location for suitable targets (this is defined
as Characters) to attack. Will ignore superusers.
Args:
location (Object): the room to scan.
Returns:
The first suitable target found.
"""
targets = [obj for obj in location.contents_get(exclude=self)
if obj.has_player and not obj.is_superuser]
return targets[0] if targets else None
def set_alive(self, *args, **kwargs):
"""
Set the mob to "alive" mode. This effectively
resurrects it from the dead state.
"""
self.db.health = self.db.full_health
self.db.is_dead = False
self.db.desc = self.db.desc_alive
self.ndb.is_immortal = self.db.immortal
self.ndb.is_patrolling = self.db.patrolling
if not self.location:
self.move_to(self.home)
if self.db.patrolling:
self.start_patrolling()
def set_dead(self):
"""
Set the mob to "dead" mode. This turns it off
and makes sure it can take no more damage.
It also starts a ticker for when it will return.
"""
self.db.is_dead = True
self.location = None
self.ndb.is_patrolling = False
self.ndb.is_attacking = False
self.ndb.is_hunting = False
self.ndb.is_immortal = True
# we shall return after some time
self._set_ticker(self.db.death_pace, "set_alive")
def start_idle(self):
"""
Starts just standing around. This will kill
the ticker and do nothing more.
"""
self._set_ticker(None, None, stop=True)
def start_patrolling(self):
"""
Start the patrolling state by
registering us with the ticker-handler
at a leasurely pace.
"""
if not self.db.patrolling:
self.start_idle()
return
self._set_ticker(self.db.patrolling_pace, "do_patrol")
self.ndb.is_patrolling = True
self.ndb.is_hunting = False
self.ndb.is_attacking = False
# for the tutorial, we also heal the mob in this mode
self.db.health = self.db.full_health
def start_hunting(self):
"""
Start the hunting state
"""
if not self.db.hunting:
self.start_patrolling()
return
self._set_ticker(self.db.hunting_pace, "do_hunt")
self.ndb.is_patrolling = False
self.ndb.is_hunting = True
self.ndb.is_attacking = False
def start_attacking(self):
"""
Start the attacking state
"""
if not self.db.aggressive:
self.start_hunting()
return
self._set_ticker(self.db.aggressive_pace, "do_attack")
self.ndb.is_patrolling = False
self.ndb.is_hunting = False
self.ndb.is_attacking = True
def do_patrol(self, *args, **kwargs):
"""
Called repeatedly during patrolling mode. In this mode, the
mob scans its surroundings and randomly chooses a viable exit.
One should lock exits with the traverse:has_player() lock in
order to block the mob from moving outside its area while
allowing player-controlled characters to move normally.
"""
if random.random() < 0.01 and self.db.irregular_msgs:
self.location.msg_contents(random.choice(self.db.irregular_msgs))
if self.db.aggressive:
# first check if there are any targets in the room.
target = self._find_target(self.location)
if target:
self.start_attacking()
return
# no target found, look for an exit.
exits = [exi for exi in self.location.exits
if exi.access(self, "traverse")]
if exits:
# randomly pick an exit
exit = random.choice(exits)
# move there.
self.move_to(exit.destination)
else:
# no exits! teleport to home to get away.
self.move_to(self.home)
def do_hunting(self, *args, **kwargs):
"""
Called regularly when in hunting mode. In hunting mode the mob
scans adjacent rooms for enemies and moves towards them to
attack if possible.
"""
if random.random() < 0.01 and self.db.irregular_msgs:
self.location.msg_contents(random.choice(self.db.irregular_msgs))
if self.db.aggressive:
# first check if there are any targets in the room.
target = self._find_target(self.location)
if target:
self.start_attacking()
return
# no targets found, scan surrounding rooms
exits = [exi for exi in self.location.exits
if exi.access(self, "traverse")]
if exits:
# scan the exits destination for targets
for exit in exits:
target = self._find_target(exit.destination)
if target:
# a target found. Move there.
self.move_to(exit.destination)
return
# if we get to this point we lost our
# prey. Resume patrolling.
self.start_patrolling()
else:
# no exits! teleport to home to get away.
self.move_to(self.home)
def do_attack(self, *args, **kwargs):
"""
Called regularly when in attacking mode. In attacking mode
the mob will bring its weapons to bear on any targets
in the room.
"""
if random.random() < 0.01 and self.db.irregular_msgs:
self.location.msg_contents(random.choice(self.db.irregular_msgs))
# first make sure we have a target
target = self._find_target(self.location)
if not target:
# no target, start looking for one
self.start_hunting()
return
# we use the same attack commands as defined in
# tutorial_world.objects.Weapon, assuming that
# the mob is given a Weapon to attack with.
attack_cmd = random.choice(("thrust", "pierce", "stab", "slash", "chop"))
self.execute_cmd("%s %s" % (attack_cmd, target))
# analyze the current state
if target.db.health <= 0:
# we reduced the target to <= 0 health. Move them to the
# defeated room
target.msg(self.db.defeat_msg)
self.location.msg_contents(self.db.defeat_msg_room % target.key, exclude=target)
send_defeated_to = search_object(self.db.send_defeated_to)
if send_defeated_to:
target.move_to(send_defeated_to[0], quiet=True)
else:
logger.log_err("Mob: mob.db.send_defeated_to not found: %s" % self.db.send_defeated_to)
# response methods - called by other objects
def at_hit(self, weapon, attacker, damage):
"""
Someone landed a hit on us. Check our status
and start attacking if not already doing so.
"""
if not self.ndb.is_immortal:
if not weapon.db.magic:
# not a magic weapon - divide away magic resistance
damage /= self.db.damage_resistance
attacker.msg(self.db.weapon_ineffective_msg)
else:
self.location.msg_contents(self.db.hit_msg)
self.db.health -= damage
# analyze the result
if self.db.health <= 0:
# we are dead!
attacker.msg(self.db.death_msg)
self.set_dead()
else:
# still alive, start attack if not already attacking
if self.db.aggressive and not self.ndb.is_attacking:
self.start_attacking()
def at_new_arrival(self, new_character):
"""
This is triggered whenever a new character enters the room.
This is called by the TutorialRoom the mob stands in and
allows it to be aware of changes immediately without needing
to poll for them all the time. For example, the mob can react
right away, also when patrolling on a very slow ticker.
"""
# the room actually already checked all we need, so
# we know it is a valid target.
if self.db.aggressive and not self.ndb.is_attacking:
self.start_attacking()
| bsd-3-clause | 6,176,525,833,461,609,000 | 36.742788 | 112 | 0.596077 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/librarianserver/tests/test_storage_db.py | 1 | 4704 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
import hashlib
import shutil
import tempfile
import unittest
from lp.services.database.sqlbase import flush_database_updates
from lp.services.librarian.model import LibraryFileContent
from lp.services.librarianserver import db
from lp.services.librarianserver.storage import (
DigestMismatchError,
DuplicateFileIDError,
LibrarianStorage,
LibraryFileUpload,
)
from lp.testing.dbuser import switch_dbuser
from lp.testing.layers import LaunchpadZopelessLayer
class LibrarianStorageDBTests(unittest.TestCase):
layer = LaunchpadZopelessLayer
def setUp(self):
switch_dbuser('librarian')
self.directory = tempfile.mkdtemp()
self.storage = LibrarianStorage(self.directory, db.Library())
def tearDown(self):
shutil.rmtree(self.directory, ignore_errors=True)
def test_addFile(self):
data = 'data ' * 50
digest = hashlib.sha1(data).hexdigest()
newfile = self.storage.startAddFile('file1', len(data))
newfile.srcDigest = digest
newfile.append(data)
fileid, aliasid = newfile.store()
self.failUnless(self.storage.hasFile(fileid))
def test_addFiles_identical(self):
# Start adding two files with identical data
data = 'data ' * 5000
newfile1 = self.storage.startAddFile('file1', len(data))
newfile2 = self.storage.startAddFile('file2', len(data))
newfile1.append(data)
newfile2.append(data)
id1, alias1 = newfile1.store()
id2, alias2 = newfile2.store()
# Make sure we actually got an id
self.assertNotEqual(None, id1)
self.assertNotEqual(None, id2)
# But they are two different ids, because we leave duplicate handling
# to the garbage collector
self.failIfEqual(id1, id2)
def test_badDigest(self):
data = 'data ' * 50
digest = 'crud'
newfile = self.storage.startAddFile('file', len(data))
newfile.srcDigest = digest
newfile.append(data)
self.assertRaises(DigestMismatchError, newfile.store)
def test_alias(self):
# Add a file (and so also add an alias)
data = 'data ' * 50
newfile = self.storage.startAddFile('file1', len(data))
newfile.mimetype = 'text/unknown'
newfile.append(data)
fileid, aliasid = newfile.store()
# Check that its alias has the right mimetype
fa = self.storage.getFileAlias(aliasid, None, '/')
self.assertEqual('text/unknown', fa.mimetype)
# Re-add the same file, with the same name and mimetype...
newfile2 = self.storage.startAddFile('file1', len(data))
newfile2.mimetype = 'text/unknown'
newfile2.append(data)
fileid2, aliasid2 = newfile2.store()
# Verify that we didn't get back the same alias ID
self.assertNotEqual(fa.id,
self.storage.getFileAlias(aliasid2, None, '/').id)
def test_clientProvidedDuplicateIDs(self):
# This test checks the new behaviour specified by LibrarianTransactions
# spec: don't create IDs in DB, but do check they don't exist.
# Create a new file
newfile = LibraryFileUpload(self.storage, 'filename', 0)
# Set a content ID on the file (same as would happen with a
# client-generated ID) and store it
newfile.contentID = 666
newfile.store()
newfile = LibraryFileUpload(self.storage, 'filename', 0)
newfile.contentID = 666
self.assertRaises(DuplicateFileIDError, newfile.store)
def test_clientProvidedDuplicateContent(self):
# Check the new behaviour specified by LibrarianTransactions
# spec: allow duplicate content with distinct IDs.
content = 'some content'
# Store a file with id 6661
newfile1 = LibraryFileUpload(self.storage, 'filename', 0)
newfile1.contentID = 6661
newfile1.append(content)
fileid1, aliasid1 = newfile1.store()
# Store second file identical to the first, with id 6662
newfile2 = LibraryFileUpload(self.storage, 'filename', 0)
newfile2.contentID = 6662
newfile2.append(content)
fileid2, aliasid2 = newfile2.store()
# Create rows in the database for these files.
LibraryFileContent(
filesize=0, sha1='foo', md5='xx', sha256='xx', id=6661)
LibraryFileContent(
filesize=0, sha1='foo', md5='xx', sha256='xx', id=6662)
flush_database_updates()
# And no errors should have been raised!
| agpl-3.0 | -1,992,719,405,499,964,400 | 34.908397 | 79 | 0.659439 | false |
chokribr/inveniotest | modules/webcomment/lib/webcomment_templates.py | 15 | 133802 | # -*- coding: utf-8 -*-
## Comments and reviews for records.
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""HTML Templates for commenting features """
__revision__ = "$Id$"
import cgi
# Invenio imports
from invenio.urlutils import create_html_link, create_url
from invenio.webuser import get_user_info, collect_user_info, isGuestUser, get_email
from invenio.dateutils import convert_datetext_to_dategui
from invenio.webmessage_mailutils import email_quoted_txt2html
from invenio.config import CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_BASE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_SITE_NAME_INTL,\
CFG_SITE_SUPPORT_EMAIL,\
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBCOMMENT_ALLOW_COMMENTS, \
CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR, \
CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN, \
CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION, \
CFG_CERN_SITE, \
CFG_SITE_RECORD, \
CFG_WEBCOMMENT_MAX_ATTACHED_FILES, \
CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE
from invenio.htmlutils import get_html_text_editor, create_html_select
from invenio.messages import gettext_set_language
from invenio.bibformat import format_record
from invenio.access_control_engine import acc_authorize_action
from invenio.access_control_admin import acc_get_user_roles_from_user_info, acc_get_role_id
from invenio.search_engine_utils import get_fieldvalues
class Template:
"""templating class, refer to webcomment.py for examples of call"""
def tmpl_get_first_comments_without_ranking(self, recID, ln, comments, nb_comments_total, warnings):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param warnings: list of warning tuples (warning_text, warning_color)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_id = 6
warnings = self.tmpl_warnings(warnings, ln)
# write button
write_button_label = _("Write a comment")
write_button_link = '%s/%s/%s/comments/add' % (CFG_SITE_URL, CFG_SITE_RECORD, recID)
write_button_form = '<input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=write_button_label)
# comments
comment_rows = ''
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%s" class="cmtRound">' % (comment_round_name)
if comment_round_name:
comment_rows += '<div class="webcomment_comment_round_header">' + \
_('%(x_nb)i Comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "</div>"
else:
comment_rows += '<div class="webcomment_comment_round_header">' + \
_('%(x_nb)i Comments') % {'x_nb': len(comments_list),} + "</div>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += """
<tr>
<td>"""
report_link = '%s/%s/%s/comments/report?ln=%s&comid=%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
reply_link = '%s/%s/%s/comments/add?ln=%s&comid=%s&action=REPLY' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_without_ranking(req=None, ln=ln, nickname=messaging_link, comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body], status='', nb_reports=0,
report_link=report_link, reply_link=reply_link, recID=recID)
comment_rows += """
<br />
<br />
</td>
</tr>"""
# Close comment round
comment_rows += '</div>'
# output
if nb_comments_total > 0:
out = warnings
comments_label = len(comments) > 1 and _("Showing the latest %i comments:") % len(comments) \
or ""
out += """
<div class="video_content_clear"></div>
<table class="webcomment_header_comments">
<tr>
<td class="blocknote">%(comment_title)s</td>
</tr>
</table>
<div class="websomment_header_comments_label">%(comments_label)s</div>
%(comment_rows)s
%(view_all_comments_link)s
%(write_button_form)s<br />""" % \
{'comment_title': _("Discuss this document"),
'comments_label': comments_label,
'nb_comments_total' : nb_comments_total,
'recID': recID,
'comment_rows': comment_rows,
'tab': ' '*4,
'siteurl': CFG_SITE_URL,
's': nb_comments_total>1 and 's' or "",
'view_all_comments_link': nb_comments_total>0 and '''<a class="webcomment_view_all_comments" href="%s/%s/%s/comments/display">View all %s comments</a>''' \
% (CFG_SITE_URL, CFG_SITE_RECORD, recID, nb_comments_total) or "",
'write_button_form': write_button_form,
'nb_comments': len(comments)
}
if not comments:
out = """
<!-- comments title table -->
<table class="webcomment_header_comments">
<tr>
<td class="blocknote">%(discuss_label)s:</td>
</tr>
</table>
<div class="webcomment_header_details">%(detailed_info)s
<br />
</div>
%(form)s
""" % {'form': write_button_form,
'discuss_label': _("Discuss this document"),
'detailed_info': _("Start a discussion about any aspect of this document.")
}
return out
def tmpl_record_not_found(self, status='missing', recID="", ln=CFG_SITE_LANG):
"""
Displays a page when bad or missing record ID was given.
@param status: 'missing' : no recID was given
'inexistant': recID doesn't have an entry in the database
'deleted': : recID has been deleted
'nan' : recID is not a number
'invalid' : recID is an error code, i.e. in the interval [-99,-1]
@param return: body of the page
"""
_ = gettext_set_language(ln)
if status == 'inexistant':
body = _("Sorry, the record %s does not seem to exist.") % (recID,)
elif status in ('deleted'):
body = _("The record has been deleted.")
elif status in ('nan', 'invalid'):
body = _("Sorry, %s is not a valid ID value.") % (recID,)
else:
body = _("Sorry, no record ID was provided.")
body += "<br /><br />"
link = "<a href=\"%s?ln=%s\">%s</a>." % (CFG_SITE_URL, ln, CFG_SITE_NAME_INTL.get(ln, CFG_SITE_NAME))
body += _("You may want to start browsing from %s") % link
return body
def tmpl_get_first_comments_with_ranking(self, recID, ln, comments=None, nb_comments_total=None, avg_score=None, warnings=[]):
"""
@param recID: record id
@param ln: language
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param nb_comments_total: total number of comments for this record
@param avg_score: average score of all reviews
@param warnings: list of warning tuples (warning_text, warning_color)
@return: html of comments
"""
# load the right message language
_ = gettext_set_language(ln)
# naming data fields of comments
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_nb_votes_yes = 4
c_nb_votes_total = 5
c_star_score = 6
c_title = 7
c_id = 8
warnings = self.tmpl_warnings(warnings, ln)
#stars
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'recID' : recID,
'ln' : ln,
'yes_img' : 'smchk_gr.gif', #'yes.gif',
'no_img' : 'iconcross.gif' #'no.gif'
}
link = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/vote?ln=%(ln)s&comid=%%(comid)s' % useful_dict
useful_yes = link + '&com_value=1">' + _("Yes") + '</a>'
useful_no = link + '&com_value=-1">' + _("No") + '</a>'
#comment row
comment_rows = ' '
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_rows += '<div id="cmtRound%s" class="cmtRound">' % (comment_round_name)
comment_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "<br/>"
for comment in comments_list:
if comment[c_nickname]:
nickname = comment[c_nickname]
display = nickname
else:
(uid, nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(nickname, display, ln)
comment_rows += '''
<tr>
<td>'''
report_link = '%s/%s/%s/reviews/report?ln=%s&comid=%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln, comment[c_id])
comment_rows += self.tmpl_get_comment_with_ranking(None, ln=ln, nickname=messaging_link,
comment_uid=comment[c_user_id],
date_creation=comment[c_date_creation],
body=comment[c_body],
status='', nb_reports=0,
nb_votes_total=comment[c_nb_votes_total],
nb_votes_yes=comment[c_nb_votes_yes],
star_score=comment[c_star_score],
title=comment[c_title], report_link=report_link, recID=recID)
comment_rows += '''
%s %s / %s<br />''' % (_("Was this review helpful?"), useful_yes % {'comid':comment[c_id]}, useful_no % {'comid':comment[c_id]})
comment_rows += '''
<br />
</td>
</tr>'''
# Close comment round
comment_rows += '</div>'
# write button
write_button_link = '''%s/%s/%s/reviews/add''' % (CFG_SITE_URL, CFG_SITE_RECORD, recID)
write_button_form = ' <input type="hidden" name="ln" value="%s"/>' % ln
write_button_form = self.createhiddenform(action=write_button_link, method="get", text=write_button_form, button=_("Write a review"))
if nb_comments_total > 0:
avg_score_img = str(avg_score_img)
avg_score = str(avg_score)
nb_comments_total = str(nb_comments_total)
score = '<b>'
score += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + avg_score + '" />',
'x_nb_reviews': nb_comments_total}
useful_label = _("Readers found the following %s reviews to be most helpful.")
useful_label %= len(comments) > 1 and len(comments) or ""
view_all_comments_link ='<a class"webcomment_view_all_reviews" href="%s/%s/%s/reviews/display?ln=%s&do=hh">' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, ln)
view_all_comments_link += _("View all %s reviews") % nb_comments_total
view_all_comments_link += '</a><br />'
out = warnings + """
<!-- review title table -->
<table class="webcomment_header_ratings">
<tr>
<td class="blocknote">%(comment_title)s:</td>
</tr>
</table>
%(score_label)s<br />
%(useful_label)s
<!-- review table -->
<table class="webcomment_review_title_table">
%(comment_rows)s
</table>
%(view_all_comments_link)s
%(write_button_form)s<br />
""" % \
{ 'comment_title' : _("Rate this document"),
'score_label' : score,
'useful_label' : useful_label,
'recID' : recID,
'view_all_comments' : _("View all %s reviews") % (nb_comments_total,),
'write_comment' : _("Write a review"),
'comment_rows' : comment_rows,
'tab' : ' '*4,
'siteurl' : CFG_SITE_URL,
'view_all_comments_link': nb_comments_total>0 and view_all_comments_link or "",
'write_button_form' : write_button_form
}
else:
out = '''
<!-- review title table -->
<table class="webcomment_header_ratings">
<tr>
<td class="blocknote"><div class="webcomment_review_first_introduction">%s:</td>
</tr>
</table>
%s<br />
%s
<br />''' % (_("Rate this document"),
_('Be the first to review this document.</div>'),
write_button_form)
return out
def tmpl_get_comment_without_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, reply_link=None, report_link=None, undelete_link=None, delete_links=None, unreport_link=None, recID=-1, com_id='', attached_files=None, collapsed_p=False):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment:
da: deleted by author
dm: deleted by moderator
ok: active
@param nb_reports: number of reports the comment has
@param reply_link: if want reply and report, give the http links
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_links: http links to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@param com_id: ID of the comment displayed
@param attached_files: list of attached files
@param collapsed_p: if the comment should be collapsed or not
@return: html table of comment
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
user_info = collect_user_info(req)
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
if attached_files is None:
attached_files = []
out = ''
final_body = email_quoted_txt2html(body)
title = nickname
title += '<a name="C%s" id="C%s"></a>' % (com_id, com_id)
links = ''
if not isGuestUser(user_info['uid']):
# Add link to toggle comment visibility
links += create_html_link(CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/' + str(recID) + '/comments/toggle',
{'comid': com_id, 'ln': ln, 'collapse': collapsed_p and '0' or '1', 'referer': user_info['uri']},
_("Close"),
{'onclick': "return toggle_visibility(this, %s, 'fast');" % com_id},
escape_linkattrd=False)
moderator_links = ''
if reply_link:
links += '<a class="webcomment_comment_reply" href="' + reply_link +'">' + _("Reply") +'</a>'
if report_link and status != 'ap':
links += '<a class="webcomment_comment_report" href="' + report_link +'">' + _("Report abuse") + '</a>'
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
final_body = '<div class="webcomment_deleted_comment_message">(Comment deleted by the moderator) - not visible for users<br /><br />' +\
final_body + '</div>'
else:
final_body = '<div class="webcomment_deleted_comment_message">(Comment deleted by the author) - not visible for users<br /><br />' +\
final_body + '</div>'
links = ''
moderator_links += '<a class="webcomment_deleted_comment_undelete" href="' + undelete_link + '">' + _("Undelete comment") + '</a>'
else:
if status == 'dm':
final_body = '<div class="webcomment_deleted_comment_message">Comment deleted by the moderator</div>'
else:
final_body = '<div class="webcomment_deleted_comment_message">Comment deleted by the author</div>'
links = ''
else:
if not auth_code:
moderator_links += '<a class="webcomment_comment_delete" href="' + delete_links['mod'] +'">' + _("Delete comment") + '</a>'
elif (user_info['uid'] == comment_uid) and CFG_WEBCOMMENT_AUTHOR_DELETE_COMMENT_OPTION:
moderator_links += '<a class="webcomment_comment_delete" href="' + delete_links['auth'] +'">' + _("Delete comment") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
final_body = '<div class="webcomment_reported_comment_message">(Comment reported. Pending approval) - not visible for users<br /><br />' + final_body + '</div>'
links = ''
moderator_links += '<a class="webcomment_reported_comment_unreport" href="' + unreport_link +'">' + _("Unreport comment") + '</a>'
else:
final_body = '<div class="webcomment_comment_pending_approval_message">This comment is pending approval due to user reports</div>'
links = ''
if links and moderator_links:
links = links + moderator_links
elif not links:
links = moderator_links
attached_files_html = ''
if attached_files:
attached_files_html = '<div class="cmtfilesblock"><b>%s:</b><br/>' % (len(attached_files) == 1 and _("Attached file") or _("Attached files"))
for (filename, filepath, fileurl) in attached_files:
attached_files_html += create_html_link(urlbase=fileurl, urlargd={},
link_label=cgi.escape(filename)) + '<br />'
attached_files_html += '</div>'
toggle_visibility_block = ''
if not isGuestUser(user_info['uid']):
toggle_visibility_block = """<div class="webcomment_toggle_visibility"><a id="collapsible_ctr_%(comid)s" class="%(collapse_ctr_class)s" href="%(toggle_url)s" onclick="return toggle_visibility(this, %(comid)i);" title="%(collapse_label)s"><span style="display:none">%(collapse_label)s</span></a></div>""" % \
{'comid': com_id,
'toggle_url': create_url(CFG_SITE_URL + '/' + CFG_SITE_RECORD + '/' + str(recID) + '/comments/toggle', {'comid': com_id, 'ln': ln, 'collapse': collapsed_p and '0' or '1', 'referer': user_info['uri']}),
'collapse_ctr_class': collapsed_p and 'webcomment_collapse_ctr_right' or 'webcomment_collapse_ctr_down',
'collapse_label': collapsed_p and _("Open") or _("Close")}
out += """
<div class="webcomment_comment_box">
%(toggle_visibility_block)s
<div class="webcomment_comment_avatar"><img class="webcomment_comment_avatar_default" src="%(site_url)s/img/user-icon-1-24x24.gif" alt="avatar" /></div>
<div class="webcomment_comment_content">
<div class="webcomment_comment_title">
%(title)s
<div class="webcomment_comment_date">%(date)s</div>
<a class="webcomment_permalink" title="Permalink to this comment" href="#C%(comid)i">¶</a>
</div>
<div class="collapsible_content" id="collapsible_content_%(comid)i" style="%(collapsible_content_style)s">
<blockquote>
%(body)s
</blockquote>
%(attached_files_html)s
<div class="webcomment_comment_options">%(links)s</div>
</div>
<div class="clearer"></div>
</div>
<div class="clearer"></div>
</div>""" % \
{'title' : title,
'body' : final_body,
'links' : links,
'attached_files_html': attached_files_html,
'date': date_creation,
'site_url': CFG_SITE_URL,
'comid': com_id,
'collapsible_content_style': collapsed_p and 'display:none' or '',
'toggle_visibility_block': toggle_visibility_block,
}
return out
def tmpl_get_comment_with_ranking(self, req, ln, nickname, comment_uid, date_creation, body, status, nb_reports, nb_votes_total, nb_votes_yes, star_score, title, report_link=None, delete_links=None, undelete_link=None, unreport_link=None, recID=-1):
"""
private function
@param req: request object to fetch user info
@param ln: language
@param nickname: nickname
@param date_creation: date comment was written
@param body: comment body
@param status: status of the comment
@param nb_reports: number of reports the comment has
@param nb_votes_total: total number of votes for this review
@param nb_votes_yes: number of positive votes for this record
@param star_score: star score for this record
@param title: title of review
@param report_link: if want reply and report, give the http links
@param undelete_link: http link to delete the message
@param delete_link: http link to delete the message
@param unreport_link: http link to unreport the comment
@param recID: recID where the comment is posted
@return: html table of review
"""
from invenio.search_engine import guess_primary_collection_of_a_record
# load the right message language
_ = gettext_set_language(ln)
if star_score > 0:
star_score_img = 'stars-' + str(star_score) + '-0.png'
else:
star_score_img = 'stars-0-0.png'
out = ""
date_creation = convert_datetext_to_dategui(date_creation, ln=ln)
reviewed_label = _("Reviewed by %(x_nickname)s on %(x_date)s") % {'x_nickname': nickname, 'x_date':date_creation}
## FIX
nb_votes_yes = str(nb_votes_yes)
nb_votes_total = str(nb_votes_total)
useful_label = _("%(x_nb_people)s out of %(x_nb_total)s people found this review useful") % {'x_nb_people': nb_votes_yes,
'x_nb_total': nb_votes_total}
links = ''
_body = ''
if body != '':
_body = '''
<blockquote>
%s
</blockquote>''' % email_quoted_txt2html(body, linebreak_html='')
# Check if user is a comment moderator
record_primary_collection = guess_primary_collection_of_a_record(recID)
user_info = collect_user_info(req)
(auth_code, auth_msg) = acc_authorize_action(user_info, 'moderatecomments', collection=record_primary_collection)
if status in ['dm', 'da'] and req:
if not auth_code:
if status == 'dm':
_body = '<div class="webcomment_deleted_review_message">(Review deleted by moderator) - not visible for users<br /><br />' +\
_body + '</div>'
else:
_body = '<div class="webcomment_deleted_review_message">(Review deleted by author) - not visible for users<br /><br />' +\
_body + '</div>'
links = '<a class="webcomment_deleted_review_undelete" href="' + undelete_link + '">' + _("Undelete review") + '</a>'
else:
if status == 'dm':
_body = '<div class="webcomment_deleted_review_message">Review deleted by moderator</div>'
else:
_body = '<div class="webcomment_deleted_review_message">Review deleted by author</div>'
links = ''
else:
if not auth_code:
links += '<a class="webcomment_review_delete" href="' + delete_links['mod'] +'">' + _("Delete review") + '</a>'
if nb_reports >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN:
if not auth_code:
_body = '<div class="webcomment_review_pending_approval_message">(Review reported. Pending approval) - not visible for users<br /><br />' + _body + '</div>'
links += ' | '
links += '<a class="webcomment_reported_review_unreport" href="' + unreport_link +'">' + _("Unreport review") + '</a>'
else:
_body = '<div class="webcomment_review_pending_approval_message">This review is pending approval due to user reports.</div>'
links = ''
out += '''
<div class="webcomment_review_box">
<div class="webcomment_review_box_inner">
<img src="%(baseurl)s/img/%(star_score_img)s" alt="%(star_score)s/>
<div class="webcomment_review_title">%(title)s</div>
<div class="webcomment_review_label_reviewed">%(reviewed_label)s</div>
<div class="webcomment_review_label_useful">%(useful_label)s</div>
%(body)s
</div>
</div>
%(abuse)s''' % {'baseurl' : CFG_BASE_URL,
'star_score_img': star_score_img,
'star_score' : star_score,
'title' : cgi.escape(title),
'reviewed_label': reviewed_label,
'useful_label' : useful_label,
'body' : _body,
'abuse' : links
}
return out
def tmpl_get_comments(self, req, recID, ln,
nb_per_page, page, nb_pages,
display_order, display_since,
CFG_WEBCOMMENT_ALLOW_REVIEWS,
comments, total_nb_comments,
avg_score,
warnings,
border=0, reviews=0,
total_nb_reviews=0,
nickname='', uid=-1, note='',score=5,
can_send_comments=False,
can_attach_files=False,
user_is_subscribed_to_discussion=False,
user_can_unsubscribe_from_discussion=False,
display_comment_rounds=None):
"""
Get table of all comments
@param recID: record id
@param ln: language
@param nb_per_page: number of results per page
@param page: page number
@param display_order: hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param display_since: all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param CFG_WEBCOMMENT_ALLOW_REVIEWS: is ranking enable, get from config.py/CFG_WEBCOMMENT_ALLOW_REVIEWS
@param comments: tuple as returned from webcomment.py/query_retrieve_comments_or_remarks
@param total_nb_comments: total number of comments for this record
@param avg_score: average score of reviews for this record
@param warnings: list of warning tuples (warning_text, warning_color)
@param border: boolean, active if want to show border around each comment/review
@param reviews: boolean, enabled for reviews, disabled for comments
@param can_send_comments: boolean, if user can send comments or not
@param can_attach_files: boolean, if user can attach file to comment or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param user_can_unsubscribe_from_discussion: True is user is allowed to unsubscribe from discussion
"""
# load the right message language
_ = gettext_set_language(ln)
# CERN hack begins: display full ATLAS user name. Check further below too.
current_user_fullname = ""
override_nickname_p = False
if CFG_CERN_SITE:
from invenio.search_engine import get_all_collections_of_a_record
user_info = collect_user_info(uid)
if 'atlas-readaccess-active-members [CERN]' in user_info['group']:
# An ATLAS member is never anonymous to its colleagues
# when commenting inside ATLAS collections
recid_collections = get_all_collections_of_a_record(recID)
if 'ATLAS' in str(recid_collections):
override_nickname_p = True
current_user_fullname = user_info.get('external_fullname', '')
# CERN hack ends
# naming data fields of comments
if reviews:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_nb_votes_yes = 6
c_nb_votes_total = 7
c_star_score = 8
c_title = 9
c_id = 10
c_round_name = 11
c_restriction = 12
reply_to = 13
c_visibility = 14
discussion = 'reviews'
comments_link = '<a href="%s/%s/%s/comments/">%s</a> (%i)' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, _('Comments'), total_nb_comments)
reviews_link = '<b>%s (%i)</b>' % (_('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form_with_ranking(recID, uid, current_user_fullname or nickname, ln, '', score, note, warnings, show_title_p=True, can_attach_files=can_attach_files)
else:
c_nickname = 0
c_user_id = 1
c_date_creation = 2
c_body = 3
c_status = 4
c_nb_reports = 5
c_id = 6
c_round_name = 7
c_restriction = 8
reply_to = 9
c_visibility = 10
discussion = 'comments'
comments_link = '<b>%s (%i)</b>' % (_('Comments'), total_nb_comments)
reviews_link = '<a href="%s/%s/%s/reviews/">%s</a> (%i)' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, _('Reviews'), total_nb_reviews)
add_comment_or_review = self.tmpl_add_comment_form(recID, uid, nickname, ln, note, warnings, can_attach_files=can_attach_files, user_is_subscribed_to_discussion=user_is_subscribed_to_discussion)
# voting links
useful_dict = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'recID' : recID,
'ln' : ln,
'do' : display_order,
'ds' : display_since,
'nb' : nb_per_page,
'p' : page,
'reviews' : reviews,
'discussion' : discussion
}
useful_yes = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/display">' + _("Yes") + '</a>'
useful_yes %= useful_dict
useful_no = '<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/vote?ln=%(ln)s&comid=%%(comid)s&com_value=-1&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/display">' + _("No") + '</a>'
useful_no %= useful_dict
warnings = self.tmpl_warnings(warnings, ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'index',
'discussion': discussion,
'arguments' : 'do=%s&ds=%s&nb=%s' % (display_order, display_since, nb_per_page),
'arg_page' : '&p=%s' % page,
'page' : page,
'rec_id' : recID}
if not req:
req = None
## comments table
comments_rows = ''
last_comment_round_name = None
comment_round_names = [comment[0] for comment in comments]
if comment_round_names:
last_comment_round_name = comment_round_names[-1]
for comment_round_name, comments_list in comments:
comment_round_style = "display:none;"
comment_round_is_open = False
if comment_round_name in display_comment_rounds:
comment_round_is_open = True
comment_round_style = ""
comments_rows += '<div id="cmtRound%s" class="cmtround">' % (comment_round_name)
if not comment_round_is_open and \
(comment_round_name or len(comment_round_names) > 1):
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.append(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" style="display:none" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + '&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name} + "</a><br/>"
elif comment_round_name or len(comment_round_names) > 1:
new_cmtgrp = list(display_comment_rounds)
new_cmtgrp.remove(comment_round_name)
comments_rows += '''<img src="/img/right-trans.gif" id="cmtarrowiconright%(grp_id)s" alt="Open group" style="display:none" /><img src="/img/down-trans.gif" id="cmtarrowicondown%(grp_id)s" alt="Close group" />
<a class="cmtgrpswitch" name="cmtgrpLink%(grp_id)s" onclick="var cmtarrowicondown=document.getElementById('cmtarrowicondown%(grp_id)s');var cmtarrowiconright=document.getElementById('cmtarrowiconright%(grp_id)s');var subgrp=document.getElementById('cmtSubRound%(grp_id)s');if (subgrp.style.display==''){subgrp.style.display='none';cmtarrowiconright.style.display='';cmtarrowicondown.style.display='none';}else{subgrp.style.display='';cmtarrowiconright.style.display='none';cmtarrowicondown.style.display='';};return false;"''' % {'grp_id': comment_round_name}
comments_rows += 'href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s' % link_dic
comments_rows += '&' + ('&'.join(["cmtgrp=" + grp for grp in new_cmtgrp if grp != 'none']) or 'cmtgrp=none' ) + \
'#cmtgrpLink%s' % (comment_round_name) + '\">'
comments_rows += _('%(x_nb)i comments for round "%(x_name)s"') % {'x_nb': len(comments_list), 'x_name': comment_round_name}+ "</a><br/>"
comments_rows += '<div id="cmtSubRound%s" class="cmtsubround" style="%s">' % (comment_round_name,
comment_round_style)
comments_rows += '''
<script type='text/javascript'>//<![CDATA[
function toggle_visibility(this_link, comid, duration) {
if (duration == null) duration = 0;
var isVisible = $('#collapsible_content_' + comid).is(':visible');
$('#collapsible_content_' + comid).toggle(duration);
$('#collapsible_ctr_' + comid).toggleClass('webcomment_collapse_ctr_down');
$('#collapsible_ctr_' + comid).toggleClass('webcomment_collapse_ctr_right');
if (isVisible){
$('#collapsible_ctr_' + comid).attr('title', '%(open_label)s');
$('#collapsible_ctr_' + comid + ' > span').html('%(open_label)s');
} else {
$('#collapsible_ctr_' + comid).attr('title', '%(close_label)s');
$('#collapsible_ctr_' + comid + ' > span').html('%(close_label)s');
}
$.ajax({
type: 'POST',
url: '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/toggle',
data: {'comid': comid, 'ln': '%(ln)s', 'collapse': isVisible && 1 || 0}
});
/* Replace our link with a jump to the adequate, in case needed
(default link is for non-Javascript user) */
this_link.href = "#C" + comid
/* Find out if after closing comment we shall scroll a bit to the top,
i.e. go back to main anchor of the comment that we have just set */
var top = $(window).scrollTop();
if ($(window).scrollTop() >= $("#C" + comid).offset().top) {
// Our comment is now above the window: scroll to it
return true;
}
return false;
}
//]]></script>
''' % {'siteurl': CFG_SITE_URL,
'recID': recID,
'ln': ln,
'CFG_SITE_RECORD': CFG_SITE_RECORD,
'open_label': _("Open"),
'close_label': _("Close")}
thread_history = [0]
previous_depth = 0
for comment in comments_list:
if comment[reply_to] not in thread_history:
# Going one level down in the thread
thread_history.append(comment[reply_to])
depth = thread_history.index(comment[reply_to])
else:
depth = thread_history.index(comment[reply_to])
thread_history = thread_history[:depth + 1]
if previous_depth > depth:
comments_rows += ("""</div>""" * (previous_depth-depth))
if previous_depth < depth:
comments_rows += ("""<div class="webcomment_thread_block">""" * (depth-previous_depth))
previous_depth = depth
# CERN hack begins: display full ATLAS user name.
comment_user_fullname = ""
if CFG_CERN_SITE and override_nickname_p:
comment_user_fullname = get_email(comment[c_user_id])
# CERN hack ends
if comment[c_nickname]:
_nickname = comment[c_nickname]
display = _nickname
else:
(uid, _nickname, display) = get_user_info(comment[c_user_id])
messaging_link = self.create_messaging_link(_nickname, comment_user_fullname or display, ln)
from invenio.webcomment import get_attached_files # FIXME
files = get_attached_files(recID, comment[c_id])
# do NOT delete the HTML comment below. It is used for parsing... (I plead unguilty!)
comments_rows += """
<!-- start comment row -->
<div>"""
delete_links = {}
if not reviews:
report_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/display' % useful_dict % {'comid':comment[c_id]}
reply_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/add?ln=%(ln)s&action=REPLY&comid=%%(comid)s' % useful_dict % {'comid':comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_without_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], reply_link, report_link, undelete_link, delete_links, unreport_link, recID, comment[c_id], files, comment[c_visibility])
else:
report_link = '%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/report?ln=%(ln)s&comid=%%(comid)s&do=%(do)s&ds=%(ds)s&nb=%(nb)s&p=%(p)s&referer=%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/display' % useful_dict % {'comid': comment[c_id]}
delete_links['mod'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_mod?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
delete_links['auth'] = "%s/admin/webcomment/webcommentadmin.py/del_single_com_auth?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
undelete_link = "%s/admin/webcomment/webcommentadmin.py/undel_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
unreport_link = "%s/admin/webcomment/webcommentadmin.py/unreport_com?ln=%s&id=%s" % (CFG_SITE_URL, ln, comment[c_id])
comments_rows += self.tmpl_get_comment_with_ranking(req, ln, messaging_link, comment[c_user_id], comment[c_date_creation], comment[c_body], comment[c_status], comment[c_nb_reports], comment[c_nb_votes_total], comment[c_nb_votes_yes], comment[c_star_score], comment[c_title], report_link, delete_links, undelete_link, unreport_link, recID)
helpful_label = _("Was this review helpful?")
report_abuse_label = "(" + _("Report abuse") + ")"
yes_no_separator = '<td> / </td>'
if comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN or comment[c_status] in ['dm', 'da']:
report_abuse_label = ""
helpful_label = ""
useful_yes = ""
useful_no = ""
yes_no_separator = ""
comments_rows += """
<table>
<tr>
<td>%(helpful_label)s %(tab)s</td>
<td> %(yes)s </td>
%(yes_no_separator)s
<td> %(no)s </td>
<td class="reportabuse">%(tab)s%(tab)s<a href="%(report)s">%(report_abuse_label)s</a></td>
</tr>
</table>""" \
% {'helpful_label': helpful_label,
'yes' : useful_yes % {'comid':comment[c_id]},
'yes_no_separator': yes_no_separator,
'no' : useful_no % {'comid':comment[c_id]},
'report' : report_link % {'comid':comment[c_id]},
'report_abuse_label': comment[c_nb_reports] >= CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN and '' or report_abuse_label,
'tab' : ' '*2}
# do NOT remove HTML comment below. It is used for parsing...
comments_rows += """
</div>
<!-- end comment row -->"""
comments_rows += '</div></div>'
## page links
page_links = ''
# Previous
if page != 1:
link_dic['arg_page'] = 'p=%s' % (page - 1)
page_links += '<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\"><<</a> ' % link_dic
else:
page_links += ' %s ' % (' '*(len(_('Previous'))+7))
# Page Numbers
for i in range(1, nb_pages+1):
link_dic['arg_page'] = 'p=%s' % i
link_dic['page'] = '%s' % i
if i != page:
page_links += '''
<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">%(page)s</a> ''' % link_dic
else:
page_links += ''' <b>%s</b> ''' % i
# Next
if page != nb_pages:
link_dic['arg_page'] = 'p=%s' % (page + 1)
page_links += '''
<a href=\"%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)s/%(discussion)s/%(function)s?%(arguments)s&%(arg_page)s\">>></a> ''' % link_dic
else:
page_links += '%s' % (' '*(len(_('Next'))+7))
## stuff for ranking if enabled
if reviews:
if avg_score > 0:
avg_score_img = 'stars-' + str(avg_score).split('.')[0] + '-' + str(avg_score).split('.')[1] + '.png'
else:
avg_score_img = "stars-0-0.png"
ranking_average = '<br /><b>'
ranking_average += _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '</b><img src="' + CFG_SITE_URL + '/img/' + avg_score_img + '" alt="' + str(avg_score) + '" />',
'x_nb_reviews': str(total_nb_reviews)}
ranking_average += '<br />'
else:
ranking_average = ""
write_button_link = '''%s/%s/%s/%s/add''' % (CFG_SITE_URL, CFG_SITE_RECORD, recID, discussion)
write_button_form = '<input type="hidden" name="ln" value="%s"/>'
write_button_form = self.createhiddenform(action=write_button_link,
method="get",
text=write_button_form,
button = reviews and _('Write a review') or _('Write a comment'))
if reviews:
total_label = _("There is a total of %s reviews")
else:
total_label = _("There is a total of %s comments")
total_label %= total_nb_comments
review_or_comment_first = ''
if reviews == 0 and total_nb_comments == 0 and can_send_comments:
review_or_comment_first = _("Start a discussion about any aspect of this document.") + '<br />'
elif reviews == 1 and total_nb_reviews == 0 and can_send_comments:
review_or_comment_first = _("Be the first to review this document.") + '<br />'
# do NOT remove the HTML comments below. Used for parsing
body = '''
%(comments_and_review_tabs)s
<!-- start comments table -->
<div class="webcomment_comment_table">
%(comments_rows)s
</div>
<!-- end comments table -->
%(review_or_comment_first)s
<br />''' % \
{ 'record_label': _("Record"),
'back_label': _("Back to search results"),
'total_label': total_label,
'write_button_form' : write_button_form,
'write_button_form_again' : total_nb_comments>3 and write_button_form or "",
'comments_rows' : comments_rows,
'total_nb_comments' : total_nb_comments,
'comments_or_reviews' : reviews and _('review') or _('comment'),
'comments_or_reviews_title' : reviews and _('Review') or _('Comment'),
'siteurl' : CFG_SITE_URL,
'module' : "comments",
'recid' : recID,
'ln' : ln,
#'border' : border,
'ranking_avg' : ranking_average,
'comments_and_review_tabs' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
CFG_WEBCOMMENT_ALLOW_COMMENTS and \
'%s | %s <br />' % \
(comments_link, reviews_link) or '',
'review_or_comment_first' : review_or_comment_first
}
# form is not currently used. reserved for an eventual purpose
#form = """
# Display <select name="nb" size="1"> per page
# <option value="all">All</option>
# <option value="10">10</option>
# <option value="25">20</option>
# <option value="50">50</option>
# <option value="100" selected="selected">100</option>
# </select>
# comments per page that are <select name="ds" size="1">
# <option value="all" selected="selected">Any age</option>
# <option value="1d">1 day old</option>
# <option value="3d">3 days old</option>
# <option value="1w">1 week old</option>
# <option value="2w">2 weeks old</option>
# <option value="1m">1 month old</option>
# <option value="3m">3 months old</option>
# <option value="6m">6 months old</option>
# <option value="1y">1 year old</option>
# </select>
# and sorted by <select name="do" size="1">
# <option value="od" selected="selected">Oldest first</option>
# <option value="nd">Newest first</option>
# %s
# </select>
# """ % \
# (reviews==1 and '''
# <option value=\"hh\">most helpful</option>
# <option value=\"lh\">least helpful</option>
# <option value=\"hs\">highest star ranking</option>
# <option value=\"ls\">lowest star ranking</option>
# </select>''' or '''
# </select>''')
#
#form_link = "%(siteurl)s/%(module)s/%(function)s" % link_dic
#form = self.createhiddenform(action=form_link, method="get", text=form, button='Go', recid=recID, p=1)
pages = """
<div>
%(v_label)s %(comments_or_reviews)s %(results_nb_lower)s-%(results_nb_higher)s <br />
%(page_links)s
</div>
""" % \
{'v_label': _("Viewing"),
'page_links': _("Page:") + page_links ,
'comments_or_reviews': reviews and _('review') or _('comment'),
'results_nb_lower': len(comments)>0 and ((page-1) * nb_per_page)+1 or 0,
'results_nb_higher': page == nb_pages and (((page-1) * nb_per_page) + len(comments)) or (page * nb_per_page)}
if nb_pages > 1:
#body = warnings + body + form + pages
body = warnings + body + pages
else:
body = warnings + body
if reviews == 0:
if not user_is_subscribed_to_discussion:
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(recID) + '/comments/subscribe',
urlargd={},
link_label=_('Subscribe')) + \
'</b>' + ' to this discussion. You will then receive all new comments by email.' + '</div>'
body += '<br />'
elif user_can_unsubscribe_from_discussion:
body += '<div class="comment-subscribe">' + '<img src="%s/img/mail-icon-12x8.gif" border="0" alt="" />' % CFG_SITE_URL + \
' ' + '<b>' + create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(recID) + '/comments/unsubscribe',
urlargd={},
link_label=_('Unsubscribe')) + \
'</b>' + ' from this discussion. You will no longer receive emails about new comments.' + '</div>'
body += '<br />'
if can_send_comments:
body += add_comment_or_review
else:
body += '<br/><em>' + _("You are not authorized to comment or review.") + '</em>'
return '<div class="webcomment_container">' + body + '</div>'
def create_messaging_link(self, to, display_name, ln=CFG_SITE_LANG):
"""prints a link to the messaging system"""
link = "%s/yourmessages/write?msg_to=%s&ln=%s" % (CFG_SITE_URL, to, ln)
if to:
return '<a href="%s" class="maillink">%s</a>' % (link, display_name)
else:
return display_name
def createhiddenform(self, action="", method="get", text="", button="confirm", cnfrm='', **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
output = """
<form action="%s" method="%s">""" % (action, method.lower().strip() in ['get', 'post'] and method or 'get')
output += """
<table style="width:90%">
<tr>
<td style="vertical-align: top">
"""
output += text + '\n'
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td>"""
output += """
<input class="adminbutton" type="submit" value="%s" />""" % (button, )
output += """
</td>
</tr>
</table>
</form>"""
return output
def create_write_comment_hiddenform(self, action="", method="get", text="", button="confirm", cnfrm='',
enctype='', form_id=None, form_name=None, **hidden):
"""
create select with hidden values and submit button
@param action: name of the action to perform on submit
@param method: 'get' or 'post'
@param text: additional text, can also be used to add non hidden input
@param button: value/caption on the submit button
@param cnfrm: if given, must check checkbox to confirm
@param form_id: HTML 'id' attribute of the form tag
@param form_name: HTML 'name' attribute of the form tag
@param **hidden: dictionary with name=value pairs for hidden input
@return: html form
"""
enctype_attr = ''
if enctype:
enctype_attr = 'enctype="%s"' % enctype
output = """
<form action="%s" method="%s" %s%s%s>""" % \
(action, method.lower().strip() in ['get', 'post'] and method or 'get',
enctype_attr, form_name and ' name="%s"' % form_name or '',
form_id and ' id="%s"' % form_id or '')
if cnfrm:
output += """
<input type="checkbox" name="confirm" value="1" />"""
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += text + '\n'
output += """
</form>"""
return output
def tmpl_warnings(self, warnings=[], ln=CFG_SITE_LANG):
"""
Display len(warnings) warning fields
@param warnings: list of warning tuples (warning_text, warning_color)
@param ln=language
@return: html output
"""
if type(warnings) is not list:
warnings = [warnings]
warningbox = ""
if warnings:
for i in range(len(warnings)):
warning_text = warnings[i][0]
warning_color = warnings[i][1]
if warning_color == 'green':
span_class = 'exampleleader'
else:
span_class = 'important'
warningbox += '''
<span class="%(span_class)s">%(warning)s</span><br />''' % \
{ 'span_class' : span_class,
'warning' : warning_text }
return warningbox
else:
return ""
def tmpl_error(self, error, ln=CFG_SITE_LANG):
"""
Display error
@param error: string
@param ln=language
@return: html output
"""
_ = gettext_set_language(ln)
errorbox = ""
if error != "":
errorbox = "<div class=\"errorbox\">\n <b>Error:</b>\n"
errorbox += " <p>"
errorbox += error + " </p>"
errorbox += "</div><br />\n"
return errorbox
def tmpl_add_comment_form(self, recID, uid, nickname, ln, msg,
warnings, textual_msg=None, can_attach_files=False,
user_is_subscribed_to_discussion=False, reply_to=None):
"""
Add form for comments
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to
warning, or when replying to a comment
@param textual_msg: same as 'msg', but contains the textual
version in case user cannot display CKeditor
@param warnings: list of warning tuples (warning_text, warning_color)
@param can_attach_files: if user can upload attach file to record or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param reply_to: the ID of the comment we are replying to. None if not replying
@return html add comment form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
if textual_msg is None:
textual_msg = msg
# FIXME a cleaner handling of nicknames is needed.
if not nickname:
(uid, nickname, display) = get_user_info(uid)
if nickname:
note = _("Note: Your nickname, %s, will be displayed as author of this comment.") % ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
if not CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR:
note += '<br />' + ' '*10 + cgi.escape('You can use some HTML tags: <a href>, <strong>, <blockquote>, <br />, <p>, <em>, <ul>, <li>, <b>, <i>')
#from invenio.search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
warnings = self.tmpl_warnings(warnings, ln)
# Prepare file upload settings. We must enable file upload in
# the ckeditor + a simple file upload interface (independant from editor)
file_upload_url = None
simple_attach_file_interface = ''
if isGuestUser(uid):
simple_attach_file_interface = "<small><em>%s</em></small><br/>" % _("Once logged in, authorized users can also attach files.")
if can_attach_files:
# Note that files can be uploaded only when user is logged in
#file_upload_url = '%s/%s/%i/comments/attachments/put' % \
# (CFG_SITE_URL, CFG_SITE_RECORD, recID)
simple_attach_file_interface = '''
<div id="uploadcommentattachmentsinterface">
<small>%(attach_msg)s: <em>(%(nb_files_limit_msg)s. %(file_size_limit_msg)s)</em></small><br />
<input class="multi max-%(CFG_WEBCOMMENT_MAX_ATTACHED_FILES)s" type="file" name="commentattachment[]"/><br />
<noscript>
<input type="file" name="commentattachment[]" /><br />
</noscript>
</div>
''' % \
{'CFG_WEBCOMMENT_MAX_ATTACHED_FILES': CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'attach_msg': CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 and _("Optionally, attach a file to this comment") or \
_("Optionally, attach files to this comment"),
'nb_files_limit_msg': _("Max one file") and CFG_WEBCOMMENT_MAX_ATTACHED_FILES == 1 or \
_("Max %i files") % CFG_WEBCOMMENT_MAX_ATTACHED_FILES,
'file_size_limit_msg': CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE > 0 and _("Max %(x_nb_bytes)s per file") % {'x_nb_bytes': (CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE < 1024*1024 and (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/1024) + 'KB') or (str(CFG_WEBCOMMENT_MAX_ATTACHMENT_SIZE/(1024*1024)) + 'MB'))} or ''}
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=textual_msg,
width='100%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
file_upload_url=file_upload_url,
toolbar_set = "WebComment",
ln=ln)
subscribe_to_discussion = ''
if not user_is_subscribed_to_discussion:
# Offer to subscribe to discussion
subscribe_to_discussion = '<small><input type="checkbox" name="subscribe" id="subscribe"/><label for="subscribe">%s</label></small>' % _("Send me an email when a new comment is posted")
form = """<div id="comment-write"><h2>%(add_comment)s</h2>
%(editor)s
<br />
%(simple_attach_file_interface)s
<span class="reportabuse">%(note)s</span>
<div class="submit-area">
%(subscribe_to_discussion)s<br />
<input class="adminbutton" type="submit" value="Add comment" onclick="user_must_confirm_before_leaving_page = false;return true;"/>
%(reply_to)s
</div>
</div>
""" % {'note': note,
'record_label': _("Article") + ":",
'comment_label': _("Comment") + ":",
'add_comment': _('Add comment'),
'editor': editor,
'subscribe_to_discussion': subscribe_to_discussion,
'reply_to': reply_to and '<input type="hidden" name="comid" value="%s"/>' % reply_to or '',
'simple_attach_file_interface': simple_attach_file_interface}
form_link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/comments/%(function)s?%(arguments)s" % link_dic
form = self.create_write_comment_hiddenform(action=form_link, method="post", text=form, button='Add comment',
enctype='multipart/form-data', form_id='cmtForm',
form_name='cmtForm')
return warnings + form + self.tmpl_page_do_not_leave_comment_page_js(ln=ln)
def tmpl_add_comment_form_with_ranking(self, recID, uid, nickname, ln, msg, score, note,
warnings, textual_msg=None, show_title_p=False,
can_attach_files=False):
"""
Add form for reviews
@param recID: record id
@param uid: user id
@param ln: language
@param msg: comment body contents for when refreshing due to warning
@param textual_msg: the textual version of 'msg' when user cannot display Ckeditor
@param score: review score
@param note: review title
@param warnings: list of warning tuples (warning_text, warning_color)
@param show_title_p: if True, prefix the form with "Add Review" as title
@param can_attach_files: if user can upload attach file to record or not
@return: html add review form
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'add',
'arguments' : 'ln=%s&action=%s' % (ln, 'SUBMIT'),
'recID' : recID}
warnings = self.tmpl_warnings(warnings, ln)
if textual_msg is None:
textual_msg = msg
#from search_engine import print_record
#record_details = print_record(recID=recID, format='hb', ln=ln)
if nickname:
note_label = _("Note: Your nickname, %s, will be displayed as the author of this review.")
note_label %= ('<i>' + nickname + '</i>')
else:
(uid, nickname, display) = get_user_info(uid)
link = '<a href="%s/youraccount/edit">' % CFG_SITE_SECURE_URL
note_label = _("Note: you have not %(x_url_open)sdefined your nickname%(x_url_close)s. %(x_nickname)s will be displayed as the author of this comment.") % \
{'x_url_open': link,
'x_url_close': '</a>',
'x_nickname': ' <br /><i>' + display + '</i>'}
selected0 = ''
selected1 = ''
selected2 = ''
selected3 = ''
selected4 = ''
selected5 = ''
if score == 0:
selected0 = ' selected="selected"'
elif score == 1:
selected1 = ' selected="selected"'
elif score == 2:
selected2 = ' selected="selected"'
elif score == 3:
selected3 = ' selected="selected"'
elif score == 4:
selected4 = ' selected="selected"'
elif score == 5:
selected5 = ' selected="selected"'
## file_upload_url = None
## if can_attach_files:
## file_upload_url = '%s/%s/%i/comments/attachments/put' % \
## (CFG_SITE_URL, CFG_SITE_RECORD, recID)
editor = get_html_text_editor(name='msg',
content=msg,
textual_content=msg,
width='90%',
height='400px',
enabled=CFG_WEBCOMMENT_USE_RICH_TEXT_EDITOR,
# file_upload_url=file_upload_url,
toolbar_set = "WebComment",
ln=ln)
form = """%(add_review)s
<table style="width: 100%%">
<tr>
<td style="padding-bottom: 10px;">%(rate_label)s:
<select name=\"score\" size=\"1\">
<option value=\"0\"%(selected0)s>-%(select_label)s-</option>
<option value=\"5\"%(selected5)s>***** (best)</option>
<option value=\"4\"%(selected4)s>****</option>
<option value=\"3\"%(selected3)s>***</option>
<option value=\"2\"%(selected2)s>**</option>
<option value=\"1\"%(selected1)s>* (worst)</option>
</select>
</td>
</tr>
<tr>
<td>%(title_label)s:</td>
</tr>
<tr>
<td style="padding-bottom: 10px;">
<input type="text" name="note" maxlength="250" style="width:90%%" value="%(note)s" />
</td>
</tr>
<tr>
<td>%(write_label)s:</td>
</tr>
<tr>
<td>
%(editor)s
</td>
</tr>
<tr>
<td class="reportabuse">%(note_label)s</td></tr>
</table>
""" % {'article_label': _('Article'),
'rate_label': _("Rate this article"),
'select_label': _("Select a score"),
'title_label': _("Give a title to your review"),
'write_label': _("Write your review"),
'note_label': note_label,
'note' : note!='' and cgi.escape(note, quote=True) or "",
'msg' : msg!='' and msg or "",
#'record' : record_details
'add_review': show_title_p and ('<h2>'+_('Add review')+'</h2>') or '',
'selected0': selected0,
'selected1': selected1,
'selected2': selected2,
'selected3': selected3,
'selected4': selected4,
'selected5': selected5,
'editor': editor,
}
form_link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/reviews/%(function)s?%(arguments)s" % link_dic
form = self.createhiddenform(action=form_link, method="post", text=form, button=_('Add Review'))
return warnings + form
def tmpl_add_comment_successful(self, recID, ln, reviews, warnings, success):
"""
@param recID: record id
@param ln: language
@return: html page of successfully added comment/review
"""
_ = gettext_set_language(ln)
link_dic = { 'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'module' : 'comments',
'function' : 'display',
'arguments' : 'ln=%s&do=od' % ln,
'recID' : recID,
'discussion': reviews == 1 and 'reviews' or 'comments'}
link = "%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(discussion)s/%(function)s?%(arguments)s" % link_dic
if warnings:
out = self.tmpl_warnings(warnings, ln) + '<br /><br />'
else:
if reviews:
out = _("Your review was successfully added.") + '<br /><br />'
else:
out = _("Your comment was successfully added.") + '<br /><br />'
link += "#C%s" % success
out += '<a href="%s">' % link
out += _('Back to record') + '</a>'
out += '<br/><br/>' \
+ _('You can also view all the comments you have submitted so far on "%(x_url_open)sYour Comments%(x_url_close)s" page.') % \
{'x_url_open': '<a target="_blank" href="%(CFG_SITE_URL)s/yourcomments?ln=%(ln)s">' % {'CFG_SITE_URL': CFG_SITE_URL, 'ln': ln},
'x_url_close': '</a>'}
return out
def tmpl_create_multiple_actions_form(self,
form_name="",
form_action="",
method="get",
action_display={},
action_field_name="",
button_label="",
button_name="",
content="",
**hidden):
""" Creates an HTML form with a multiple choice of actions and a button to select it.
@param form_action: link to the receiver of the formular
@param form_name: name of the HTML formular
@param method: either 'GET' or 'POST'
@param action_display: dictionary of actions.
action is HTML name (name of action)
display is the string provided in the popup
@param action_field_name: html name of action field
@param button_label: what's written on the button
@param button_name: html name of the button
@param content: what's inside te formular
@param **hidden: dictionary of name/value pairs of hidden fields.
"""
output = """
<form action="%s" method="%s">""" % (form_action, method)
output += """
<table>
<tr>
<td style="vertical-align: top" colspan="2">
"""
output += content + '\n'
for key in hidden.keys():
if type(hidden[key]) is list:
for value in hidden[key]:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, value)
else:
output += """
<input type="hidden" name="%s" value="%s" />""" % (key, hidden[key])
output += """
</td>
</tr>
<tr>
<td style="text-align:right;">"""
if type(action_display) is dict and len(action_display.keys()):
output += """
<select name="%s">""" % action_field_name
for (key, value) in action_display.items():
output += """
<option value="%s">%s</option>""" % (key, value)
output += """
</select>"""
output += """
</td>
<td style="text-align:left;">
<input class="adminbutton" type="submit" value="%s" name="%s"/>""" % (button_label, button_name)
output += """
</td>
</tr>
</table>
</form>"""
return output
def tmpl_admin_index(self, ln):
"""
Index page
"""
# load the right message language
_ = gettext_set_language(ln)
out = '<ol>'
if CFG_WEBCOMMENT_ALLOW_COMMENTS or CFG_WEBCOMMENT_ALLOW_REVIEWS:
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
out += '<h3>Comments status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=1">%(hot_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_cmt_label': _("View most commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=1">%(latest_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_cmt_label': _("View latest commented records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=0">%(reported_cmt_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_cmt_label': _("View all comments reported as abuse")}
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
out += '<h3>Reviews status</h3>'
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/hot?ln=%(ln)s&comments=0">%(hot_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'hot_rev_label': _("View most reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/latest?ln=%(ln)s&comments=0">%(latest_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'latest_rev_label': _("View latest reviewed records")}
out += '<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/comments?ln=%(ln)s&reviews=1">%(reported_rev_label)s</a></li>' % \
{'siteurl': CFG_SITE_URL, 'ln': ln, 'reported_rev_label': _("View all reviews reported as abuse")}
#<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/delete?ln=%(ln)s&comid=-1">%(delete_label)s</a></li>
out +="""
<h3>General</h3>
<li><a href="%(siteurl)s/admin/webcomment/webcommentadmin.py/users?ln=%(ln)s">%(view_users)s</a></li>
<li><a href="%(siteurl)s/help/admin/webcomment-admin-guide">%(guide)s</a></li>
""" % {'siteurl' : CFG_SITE_URL,
#'delete_label': _("Delete/Undelete comment(s) or suppress abuse report(s)"),
'view_users': _("View all users who have been reported"),
'ln' : ln,
'guide' : _("Guide")}
else:
out += _("Comments and reviews are disabled") + '<br />'
out += '</ol>'
from invenio.bibrankadminlib import addadminbox
return addadminbox('<b>%s</b>'% _("Menu"), [out])
def tmpl_admin_delete_form(self, ln, warnings):
"""
Display admin interface to fetch list of records to delete
@param warnings: list of warning tuples (warning_text, warning_color)
see tmpl_warnings, warning_color is optional
"""
# load the right message language
_ = gettext_set_language(ln)
warnings = self.tmpl_warnings(warnings, ln)
out = '''
<br />
%s<br />
<br />'''% _("Please enter the ID of the comment/review so that you can view it before deciding whether to delete it or not")
form = '''
<table>
<tr>
<td>%s</td>
<td><input type=text name="comid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
%s <br/>
<br />
<table>
<tr>
<td>%s</td>
<td><input type=text name="recid" size="10" maxlength="10" value="" /></td>
</tr>
<tr>
<td><br /></td>
<tr>
</table>
<br />
''' % (_("Comment ID:"),
_("Or enter a record ID to list all the associated comments/reviews:"),
_("Record ID:"))
form_link = "%s/admin/webcomment/webcommentadmin.py/delete?ln=%s" % (CFG_SITE_URL, ln)
form = self.createhiddenform(action=form_link, method="get", text=form, button=_('View Comment'))
return warnings + out + form
def tmpl_admin_users(self, ln, users_data):
"""
@param users_data: tuple of ct, i.e. (ct, ct, ...)
where ct is a tuple (total_number_reported, total_comments_reported, total_reviews_reported, total_nb_votes_yes_of_reported,
total_nb_votes_total_of_reported, user_id, user_email, user_nickname)
sorted by order of ct having highest total_number_reported
"""
_ = gettext_set_language(ln)
u_reports = 0
u_comment_reports = 1
u_reviews_reports = 2
u_nb_votes_yes = 3
u_nb_votes_total = 4
u_uid = 5
u_email = 6
u_nickname = 7
if not users_data:
return self.tmpl_warnings([(_("There have been no reports so far."), 'green')])
user_rows = ""
for utuple in users_data:
com_label = _("View all %s reported comments") % utuple[u_comment_reports]
com_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=0">%s</a><br />''' % \
(CFG_SITE_URL, ln, utuple[u_uid], com_label)
rev_label = _("View all %s reported reviews") % utuple[u_reviews_reports]
rev_link = '''<a href="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&uid=%s&reviews=1">%s</a>''' % \
(CFG_SITE_URL, ln, utuple[u_uid], rev_label)
if not utuple[u_nickname]:
user_info = get_user_info(utuple[u_uid])
nickname = user_info[2]
else:
nickname = utuple[u_nickname]
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
review_row = """
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>"""
review_row %= (utuple[u_nb_votes_yes],
utuple[u_nb_votes_total] - utuple[u_nb_votes_yes],
utuple[u_nb_votes_total])
else:
review_row = ''
user_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(nickname)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(email)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(uid)s</td>%(review_row)s
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray; font-weight: bold;">%(reports)s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%(com_link)s%(rev_link)s</td>
</tr>""" % { 'nickname' : nickname,
'email' : utuple[u_email],
'uid' : utuple[u_uid],
'reports' : utuple[u_reports],
'review_row': review_row,
'siteurl' : CFG_SITE_URL,
'ln' : ln,
'com_link' : CFG_WEBCOMMENT_ALLOW_COMMENTS and com_link or "",
'rev_link' : CFG_WEBCOMMENT_ALLOW_REVIEWS and rev_link or ""
}
out = "<br />"
out += _("Here is a list, sorted by total number of reports, of all users who have had a comment reported at least once.")
out += """
<br />
<br />
<table class="admin_wvar" style="width: 100%%;">
<thead>
<tr class="adminheaderleft">
<th>"""
out += _("Nickname") + '</th>\n'
out += '<th>' + _("Email") + '</th>\n'
out += '<th>' + _("User ID") + '</th>\n'
if CFG_WEBCOMMENT_ALLOW_REVIEWS > 0:
out += '<th>' + _("Number positive votes") + '</th>\n'
out += '<th>' + _("Number negative votes") + '</th>\n'
out += '<th>' + _("Total number votes") + '</th>\n'
out += '<th>' + _("Total number of reports") + '</th>\n'
out += '<th>' + _("View all user's reported comments/reviews") + '</th>\n'
out += """
</tr>
</thead>
<tbody>%s
</tbody>
</table>
""" % user_rows
return out
def tmpl_admin_select_comment_checkbox(self, cmt_id):
""" outputs a checkbox named "comidXX" where XX is cmt_id """
return '<input type="checkbox" name="comid%i" />' % int(cmt_id)
def tmpl_admin_user_info(self, ln, nickname, uid, email):
""" prepares informations about a user"""
_ = gettext_set_language(ln)
out = """
%(nickname_label)s: %(messaging)s<br />
%(uid_label)s: %(uid)i<br />
%(email_label)s: <a href="mailto:%(email)s">%(email)s</a>"""
out %= {'nickname_label': _("Nickname"),
'messaging': self.create_messaging_link(uid, nickname, ln),
'uid_label': _("User ID"),
'uid': int(uid),
'email_label': _("Email"),
'email': email}
return out
def tmpl_admin_review_info(self, ln, reviews, nb_reports, cmt_id, rec_id, status):
""" outputs information about a review """
_ = gettext_set_language(ln)
if reviews:
reported_label = _("This review has been reported %i times")
else:
reported_label = _("This comment has been reported %i times")
reported_label %= int(nb_reports)
out = """
%(reported_label)s<br />
<a href="%(siteurl)s/%(CFG_SITE_RECORD)s/%(rec_id)i?ln=%(ln)s">%(rec_id_label)s</a><br />
%(cmt_id_label)s"""
out %= {'reported_label': reported_label,
'rec_id_label': _("Record") + ' #' + str(rec_id),
'siteurl': CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'rec_id': int(rec_id),
'cmt_id_label': _("Comment") + ' #' + str(cmt_id),
'ln': ln}
if status in ['dm', 'da']:
out += '<br /><div style="color:red;">Marked as deleted</div>'
return out
def tmpl_admin_latest(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_latest i.e.
tuple (nickname, uid, date_creation, body, id) if latest comments or
tuple (nickname, uid, date_creation, body, star_score, id) if latest reviews
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/latest?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
out += """
<ol>
"""
for (cmt_tuple, meta_data) in comment_data:
bibrec_id = meta_data[3]
content = format_record(bibrec_id, "hs")
if not comments:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> reviewed by %(user)s</a>
(%(stars)s) \"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/reviews',
'user':cmt_tuple[0] ,
'stars': '*' * int(cmt_tuple[4]) ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
else:
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> commented by %(user)s</a>,
\"%(body)s\" on <i> %(date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/comments',
'user':cmt_tuple[0] ,
'body': cmt_tuple[3][:20] + '...',
'date': cmt_tuple[2]}
out += """</ol>"""
return out
def tmpl_admin_hot(self, ln, comment_data, comments, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is return by webcommentadminlib.py/query_get_hot i.e.
tuple (id_bibrec, date_last_comment, users, count)
"""
_ = gettext_set_language(ln)
out = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
out += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/hot?ln=%s&comments=%s">' % (CFG_SITE_URL, ln, comments)
out += '<input type="hidden" name="ln" value=%s>' % ln
out += '<input type="hidden" name="comments" value=%s>' % comments
out += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
out += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
out += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
out += '</select></div></form><br />'
if error == 1:
out += "<i>User is not authorized to view such collection.</i><br />"
return out
elif error == 2:
out += "<i>There are no %s for this collection.</i><br />" % (comments and 'comments' or 'reviews')
return out
for cmt_tuple in comment_data:
bibrec_id = cmt_tuple[0]
content = format_record(bibrec_id, "hs")
last_comment_date = cmt_tuple[1]
total_users = cmt_tuple[2]
total_comments = cmt_tuple[3]
if comments:
comment_url = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/comments'
str_comment = int(total_comments) > 1 and 'comments' or 'comment'
else:
comment_url = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(bibrec_id) + '/reviews'
str_comment = int(total_comments) > 1 and 'reviews' or 'review'
out += """
<li> %(content)s <br/> <span class="moreinfo"> <a class="moreinfo" href=%(comment_url)s> %(total_comments)s
%(str_comment)s</a>
(%(total_users)s %(user)s), latest on <i> %(last_comment_date)s </i></li> </span> <br/>
""" % {'content': content,
'comment_url': comment_url ,
'total_comments': total_comments,
'str_comment': str_comment,
'total_users': total_users,
'user': int(total_users) > 1 and 'users' or 'user',
'last_comment_date': last_comment_date}
out += """</ol>"""
return out
def tmpl_admin_comments(self, ln, uid, comID, recID, comment_data, reviews, error, user_collections, collection):
"""
@param comment_data: same type of tuple as that
which is returned by webcomment.py/query_retrieve_comments_or_remarks i.e.
tuple of comment where comment is
tuple (nickname,
date_creation,
body,
id) if ranking disabled or
tuple (nickname,
date_creation,
body,
nb_votes_yes,
nb_votes_total,
star_score,
title,
id)
"""
_ = gettext_set_language(ln)
coll_form = """
<script type='text/javascript'>
function collectionChange()
{
document.collection_form.submit();
}
</script>
"""
coll_form += '<form method="get" name="collection_form" action="%s/admin/webcomment/webcommentadmin.py/comments?ln=%s&reviews=%s">' % (CFG_SITE_URL, ln, reviews)
coll_form += '<input type="hidden" name="ln" value=%s>' % ln
coll_form += '<input type="hidden" name="reviews" value=%s>' % reviews
coll_form += '<div> Filter by collection: <select name="collection" onchange="javascript:collectionChange();">'
for collection_name in user_collections:
if collection_name == collection:
coll_form += '<option "SELECTED" value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
else:
coll_form += '<option value="%(collection_name)s">%(collection_name)s</option>' % {'collection_name': cgi.escape(collection_name)}
coll_form += '</select></div></form><br />'
if error == 1:
coll_form += "<i>User is not authorized to view such collection.</i><br />"
return coll_form
elif error == 2:
coll_form += "<i>There are no %s for this collection.</i><br />" % (reviews and 'reviews' or 'comments')
return coll_form
comments = []
comments_info = []
checkboxes = []
users = []
for (cmt_tuple, meta_data) in comment_data:
if reviews:
comments.append(self.tmpl_get_comment_with_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[9],#status
0,
cmt_tuple[5],#nb_votes_total
cmt_tuple[4],#nb_votes_yes
cmt_tuple[6],#star_score
cmt_tuple[7]))#title
else:
comments.append(self.tmpl_get_comment_without_ranking(None,#request object
ln,
cmt_tuple[0],#nickname
cmt_tuple[1],#userid
cmt_tuple[2],#date_creation
cmt_tuple[3],#body
cmt_tuple[5],#status
0,
None, #reply_link
None, #report_link
None, #undelete_link
None, #delete_links
None, #unreport_link
-1, # recid
cmt_tuple[4] # com_id
))
users.append(self.tmpl_admin_user_info(ln,
meta_data[0], #nickname
meta_data[1], #uid
meta_data[2]))#email
if reviews:
status = cmt_tuple[9]
else:
status = cmt_tuple[5]
comments_info.append(self.tmpl_admin_review_info(ln,
reviews,
meta_data[5], # nb abuse reports
meta_data[3], # cmt_id
meta_data[4], # rec_id
status)) # status
checkboxes.append(self.tmpl_admin_select_comment_checkbox(meta_data[3]))
form_link = "%s/admin/webcomment/webcommentadmin.py/del_com?ln=%s" % (CFG_SITE_URL, ln)
out = """
<table class="admin_wvar" style="width:100%%;">
<thead>
<tr class="adminheaderleft">
<th>%(review_label)s</th>
<th>%(written_by_label)s</th>
<th>%(review_info_label)s</th>
<th>%(select_label)s</th>
</tr>
</thead>
<tbody>""" % {'review_label': reviews and _("Review") or _("Comment"),
'written_by_label': _("Written by"),
'review_info_label': _("General informations"),
'select_label': _("Select")}
for i in range (0, len(comments)):
out += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintd" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (comments[i], users[i], comments_info[i], checkboxes[i])
out += """
</tbody>
</table>"""
if reviews:
action_display = {
'delete': _('Delete selected reviews'),
'unreport': _('Suppress selected abuse report'),
'undelete': _('Undelete selected reviews')
}
else:
action_display = {
'undelete': _('Undelete selected comments'),
'delete': _('Delete selected comments'),
'unreport': _('Suppress selected abuse report')
}
form = self.tmpl_create_multiple_actions_form(form_name="admin_comment",
form_action=form_link,
method="post",
action_display=action_display,
action_field_name='action',
button_label=_("OK"),
button_name="okbutton",
content=out)
if uid > 0:
header = '<br />'
if reviews:
header += _("Here are the reported reviews of user %s") % uid
else:
header += _("Here are the reported comments of user %s") % uid
header += '<br /><br />'
if comID > 0 and recID <= 0 and uid <= 0:
if reviews:
header = '<br />' +_("Here is review %s")% comID + '<br /><br />'
else:
header = '<br />' +_("Here is comment %s")% comID + '<br /><br />'
if uid > 0 and comID > 0 and recID <= 0:
if reviews:
header = '<br />' + _("Here is review %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
else:
header = '<br />' + _("Here is comment %(x_cmtID)s written by user %(x_user)s") % {'x_cmtID': comID, 'x_user': uid}
header += '<br/ ><br />'
if comID <= 0 and recID <= 0 and uid <= 0:
header = '<br />'
if reviews:
header += _("Here are all reported reviews sorted by the most reported")
else:
header += _("Here are all reported comments sorted by the most reported")
header += "<br /><br />"
elif recID > 0:
header = '<br />'
if reviews:
header += _("Here are all reviews for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=0">%s</a>' % (CFG_SITE_URL, recID, _("Show comments"))
else:
header += _("Here are all comments for record %i, sorted by the most reported" % recID)
header += '<br /><a href="%s/admin/webcomment/webcommentadmin.py/delete?comid=&recid=%s&reviews=1">%s</a>' % (CFG_SITE_URL, recID, _("Show reviews"))
header += "<br /><br />"
return coll_form + header + form
def tmpl_admin_del_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully deleted"), table_rows)
return out
def tmpl_admin_undel_com(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_undeleted),
was_successfully_undeleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style="padding-right:10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully undeleted"), table_rows)
return out
def tmpl_admin_suppress_abuse_report(self, del_res, ln=CFG_SITE_LANG):
"""
@param del_res: list of the following tuple (comment_id, was_successfully_deleted),
was_successfully_deleted is boolean (0=false, >0=true
"""
_ = gettext_set_language(ln)
table_rows = ''
for deltuple in del_res:
table_rows += """
<tr>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
<td class="admintdleft" style="padding: 5px; border-bottom: 1px solid lightgray;">%s</td>
</tr>""" % (deltuple[0], deltuple[1]>0 and _("Yes") or "<span class=\"important\">" +_("No") + "</span>")
out = """
<table class="admin_wvar">
<tr class="adminheaderleft">
<td style ="padding-right: 10px;">%s</td>
<td>%s</td>
</tr>%s
<table>""" % (_("comment ID"), _("successfully suppressed abuse report"), table_rows)
return out
def tmpl_mini_review(self, recID, ln=CFG_SITE_LANG, action='SUBMIT',
avg_score=0, nb_comments_total=0):
"""Display the mini version of reviews (only the grading part)"""
_ = gettext_set_language(ln)
url = '%s/%s/%s/reviews/add?ln=%s&action=%s' % (CFG_BASE_URL, CFG_SITE_RECORD, recID, ln, action)
if avg_score > 0:
score = _("Average review score: %(x_nb_score)s based on %(x_nb_reviews)s reviews") % \
{'x_nb_score': '<b>%.1f</b>' % avg_score,
'x_nb_reviews': nb_comments_total}
else:
score = '(' +_("Not yet reviewed") + ')'
if avg_score == 5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'full'
elif avg_score >= 4.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', 'half'
elif avg_score >= 4:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'full', ''
elif avg_score >= 3.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', 'half', ''
elif avg_score >= 3:
s1, s2, s3, s4, s5 = 'full', 'full', 'full', '', ''
elif avg_score >= 2.5:
s1, s2, s3, s4, s5 = 'full', 'full', 'half', '', ''
elif avg_score >= 2:
s1, s2, s3, s4, s5 = 'full', 'full', '', '', ''
elif avg_score >= 1.5:
s1, s2, s3, s4, s5 = 'full', 'half', '', '', ''
elif avg_score == 1:
s1, s2, s3, s4, s5 = 'full', '', '', '', ''
else:
s1, s2, s3, s4, s5 = '', '', '', '', ''
out = '''
<small class="detailedRecordActions">%(rate)s:</small><br /><br />
<div style="margin:auto;width:160px;">
<span style="display:none;">Rate this document:</span>
<div class="star %(s1)s" ><a href="%(url)s&score=1">1</a>
<div class="star %(s2)s" ><a href="%(url)s&score=2">2</a>
<div class="star %(s3)s" ><a href="%(url)s&score=3">3</a>
<div class="star %(s4)s" ><a href="%(url)s&score=4">4</a>
<div class="star %(s5)s" ><a href="%(url)s&score=5">5</a></div></div></div></div></div>
<div style="clear:both"> </div>
</div>
<small>%(score)s</small>
''' % {'url': url,
'score': score,
'rate': _("Rate this document"),
's1': s1,
's2': s2,
's3': s3,
's4': s4,
's5': s5
}
return out
def tmpl_email_new_comment_header(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG, uid=-1):
"""
Prints the email header used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
user_info = collect_user_info(uid)
out = _("Hello:") + '\n\n' + \
(reviews and _("The following review was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:") or \
_("The following comment was sent to %(CFG_SITE_NAME)s by %(user_nickname)s:")) % \
{'CFG_SITE_NAME': CFG_SITE_NAME,
'user_nickname': user_info['nickname']}
out += '\n(<%s>)' % (CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID))
out += '\n\n\n'
return out
def tmpl_email_new_comment_footer(self, recID, title, reviews,
comID, report_numbers,
can_unsubscribe=True,
ln=CFG_SITE_LANG):
"""
Prints the email footer used to notify subscribers that a new
comment/review was added.
@param recid: the ID of the commented/reviewed record
@param title: the title of the commented/reviewed record
@param reviews: True if it is a review, else if a comment
@param comID: the comment ID
@param report_numbers: the report number(s) of the record
@param can_unsubscribe: True if user can unsubscribe from alert
@param ln: language
"""
# load the right message language
_ = gettext_set_language(ln)
out = '\n\n-- \n'
out += _("This is an automatic message, please don't reply to it.")
out += '\n'
out += _("To post another comment, go to <%(x_url)s> instead.") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
(reviews and '/reviews' or '/comments') + '/add'}
out += '\n'
if not reviews:
out += _("To specifically reply to this comment, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
'/comments/add?action=REPLY&comid=' + str(comID)}
out += '\n'
if can_unsubscribe:
out += _("To unsubscribe from this discussion, go to <%(x_url)s>") % \
{'x_url': CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recID) + \
'/comments/unsubscribe'}
out += '\n'
out += _("For any question, please use <%(CFG_SITE_SUPPORT_EMAIL)s>") % \
{'CFG_SITE_SUPPORT_EMAIL': CFG_SITE_SUPPORT_EMAIL}
return out
def tmpl_email_new_comment_admin(self, recID):
"""
Prints the record information used in the email to notify the
system administrator that a new comment has been posted.
@param recID: the ID of the commented/reviewed record
"""
out = ""
title = get_fieldvalues(recID, "245__a")
authors = ', '.join(get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a"))
#res_author = ""
#res_rep_num = ""
#for author in authors:
# res_author = res_author + ' ' + author
dates = get_fieldvalues(recID, "260__c")
report_nums = get_fieldvalues(recID, "037__a")
report_nums += get_fieldvalues(recID, "088__a")
report_nums = ', '.join(report_nums)
#for rep_num in report_nums:
# res_rep_num = res_rep_num + ', ' + rep_num
out += " Title = %s \n" % (title and title[0] or "No Title")
out += " Authors = %s \n" % authors
if dates:
out += " Date = %s \n" % dates[0]
out += " Report number = %s" % report_nums
return out
def tmpl_page_do_not_leave_comment_page_js(self, ln):
"""
Code to ask user confirmation when leaving the page, so that the
comment is not lost if clicking by mistake on links.
@param ln: the user language
"""
# load the right message language
_ = gettext_set_language(ln)
out = '''
<script type="text/javascript" language="JavaScript">//<![CDATA[
var initial_comment_value = document.forms.cmtForm.msg.value;
var user_must_confirm_before_leaving_page = true;
window.onbeforeunload = confirmExit;
function confirmExit() {
var editor_type_field = document.getElementById('%(name)seditortype');
if (editor_type_field && editor_type_field.value == 'ckeditor') {
var oEditor = CKEDITOR.instances.%(name)s;
if (user_must_confirm_before_leaving_page && oEditor.checkDirty()) {
/* Might give false positives, when editor pre-loaded
with content. But is better than the opposite */
return "%(message)s";
}
} else {
if (user_must_confirm_before_leaving_page && document.forms.cmtForm.msg.value != initial_comment_value){
return "%(message)s";
}
}
}
//]]></script>
''' % {'message': _('Your comment will be lost.').replace('"', '\\"'),
'name': 'msg'}
return out
def tmpl_your_comments(self, user_info, comments, page_number=1, selected_order_by_option="lcf", selected_display_number_option="all", selected_display_format_option="rc", nb_total_results=0, nb_total_pages=0, ln=CFG_SITE_LANG):
"""
Display all submitted comments by the user
@param user_info: standard user info object.
@param comments: ordered list of tuples (id_bibrec, comid, date_creation, body, status, in_reply_to_id_cmtRECORDCOMMENT)
@param page_number: page on which the user is.
@type page_number: integer
@param selected_order_by_option: seleccted ordering option. Can be one of:
- ocf: Oldest comment first
- lcf: Latest comment first
- grof: Group by record, oldest commented first
- grlf: Group by record, latest commented first
@type selected_order_by_option: string
@param selected_display_number_option: number of results to show per page. Can be a string-digit or 'all'.
@type selected_display_number_option: string
@param selected_display_format_option: how to show records. Can be one of:
- rc: Records and comments
- ro: Records only
- co: Comments only
@type selected_display_format_option: string
@param nb_total_results: total number of items to display.
@type nb_total_results: integer
@param nb_total_pages: total number of pages.
@type nb_total_pages: integer
@ln: language
@type ln: string
"""
# load the right message language
_ = gettext_set_language(ln)
from invenio.search_engine import record_exists
your_comments_order_by_options = (('ocf', _("Oldest comment first")),
('lcf', _("Latest comment first")),
('grof', _("Group by record, oldest commented first")),
('grlf', _("Group by record, latest commented first")),
)
your_comments_display_format_options = (('rc', _("Records and comments")),
('ro', _('Records only')),
('co', _('Comments only')),
)
your_comments_display_number_options = (('20', _("%s items") % 20),
('50', _("%s items") % 50),
('100', _("%s items") % 100),
('500',_("%s items") % 500),
('all', _('All items')),
)
out = ""
out += _("Below is the list of the comments you have submitted so far.") + "<br/>"
if CFG_CERN_SITE:
if nb_total_results == 0:
out = _('You have not yet submitted any comment in the document "discussion" tab.') + "<br/>"
user_roles = acc_get_user_roles_from_user_info(user_info)
if acc_get_role_id('ATLASDraftPublication') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': 'ATLAS Publication Drafts Comments',
'p': user_info['email'],
'f': '859__f'},
link_label='ATLAS Publication Drafts Comments')
elif acc_get_role_id('cmsphysicsmembers') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': '',
'p': user_info['email'],
'f': '859__f'},
link_label='CMS Publication Drafts Comments')
elif acc_get_role_id('LHCbDraftPublication') in user_roles:
out += _('You might find other comments here: ')
out += create_html_link(urlbase=CFG_SITE_URL + '/search',
urlargd={'ln': ln,
'cc': '',
'p': user_info['email'],
'f': '859__f'},
link_label='LHCb Publication Drafts Comments')
out += '<br/>'
if nb_total_results == 0:
return out
else:
if nb_total_results == 0:
return _("You have not yet submitted any comment. Browse documents from the search interface and take part to discussions!")
# Show controls
format_selection = create_html_select(your_comments_display_format_options,
name="format", selected=selected_display_format_option,
attrs={'id': 'format',
'onchange': 'this.form.submit();'})
order_by_selection = create_html_select(your_comments_order_by_options,
name="order_by", selected=selected_order_by_option,
attrs={'id': 'order_by',
'onchange': 'this.form.submit();'})
nb_per_page_selection = create_html_select(your_comments_display_number_options,
name="per_page", selected=selected_display_number_option,
attrs={'id': 'per_page',
'onchange': 'this.form.submit();'})
out += '''
<form method="get" class="yourcommentsdisplayoptionsform">
<fieldset id="yourcommentsdisplayoptions">
<legend>%(display_option_label)s:</legend>
<label for="format">%(format_selection_label)s :</label> %(format_selection)s
<label for="order_by">%(order_selection_label)s :</label> %(order_by_selection)s
<label for="per_page">%(per_page_selection_label)s :</label> %(nb_per_page_selection)s
<noscript><input type="submit" value="%(refresh_label)s" class="formbutton"/></noscript>
</fieldset>
</form>
''' % {'format_selection_label': _("Display"),
'order_selection_label': _("Order by"),
'per_page_selection_label': _("Per page"),
'format_selection': format_selection,
'order_by_selection': order_by_selection,
'nb_per_page_selection': nb_per_page_selection,
'display_option_label': _("Display options"),
'refresh_label': _("Refresh"),
}
# Show comments
last_id_bibrec = None
nb_record_groups = 0
out += '<div id="yourcommentsmaincontent">'
for id_bibrec, comid, date_creation, body, status, in_reply_to_id_cmtRECORDCOMMENT in comments:
if last_id_bibrec != id_bibrec and selected_display_format_option in ('rc', 'ro'):
# We moved to another record. Show some info about
# current record.
if last_id_bibrec:
# Close previous group
out += "</div></div>"
nb_record_groups += 1
# You might want to hide this information if user does
# not have access, though it would make sense that he
# can at least know on which page his comment appears..
if record_exists(id_bibrec) == -1:
record_info_html = '<em>%s</em>' % _("The record has been deleted.")
else:
record_info_html = format_record(id_bibrec, of="HS")
out += '''<div class="yourcommentsrecordgroup" id="yourcomments-record-group-%(recid)s">
<div class="yourcommentsrecordgroup%(recid)sheader">• ''' % {'recid': id_bibrec} + \
record_info_html + '</div><div style="padding-left: 20px;">'
if selected_display_format_option != 'ro':
final_body = email_quoted_txt2html(body)
title = '<a name="C%s" id="C%s"></a>' % (comid, comid)
if status == "dm":
final_body = '<div class="webcomment_deleted_comment_message">%s</div>' % _("Comment deleted by the moderator")
elif status == "da":
final_body = ('<div class="webcomment_deleted_comment_message">%s<br /><br />' % _("You have deleted this comment: it is not visible by other users")) +\
final_body + '</div>'
links = []
if in_reply_to_id_cmtRECORDCOMMENT:
links.append(create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(id_bibrec) + '/comments/',
urlargd={'ln': ln},
link_label=_('(in reply to a comment)'),
urlhash=str(in_reply_to_id_cmtRECORDCOMMENT)))
links.append(create_html_link(urlbase=CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + \
str(id_bibrec) + '/comments/',
urlargd={'ln': ln},
link_label=_('See comment on discussion page'),
urlhash='C' + str(comid)))
out += '''
<div class="webcomment_comment_box">
<div class="webcomment_comment_avatar"><img class="webcomment_comment_avatar_default" src="%(site_url)s/img/user-icon-1-24x24.gif" alt="avatar" /></div>
<div class="webcomment_comment_content">
<div class="webcomment_comment_title">
%(title)s
<div class="webcomment_comment_date">%(date)s</div>
<a class="webcomment_permalink" title="Permalink to this comment" href="#C%(comid)i">¶</a>
</div>
<div class="collapsible_content">
<blockquote>
%(body)s
</blockquote>
<div class="webcomment_comment_options">%(links)s</div>
</div>
<div class="clearer"></div>
</div>
<div class="clearer"></div>
</div>''' % \
{'title' : title,
'body' : final_body,
'links' : " ".join(links),
'date' : date_creation,
'site_url' : CFG_SITE_URL,
'comid' : comid,
}
last_id_bibrec = id_bibrec
out += '</div>' # end 'yourcommentsmaincontent'
# Show page navigation
page_links = ''
if selected_display_format_option == 'ro' and \
selected_order_by_option in ('ocf', 'lcf'):
# We just have an approximation here (we count by
# comments, not record...)
page_links += (_("%i comments found in total (not shown on this page)") % nb_total_results) + ' '
else:
page_links += (_("%i items found in total") % nb_total_results) + ' '
if selected_display_number_option != 'all':
# Previous
if page_number != 1:
page_links += create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': page_number - 1,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
_("Previous"))
# Page Numbers
for i in range(1, nb_total_pages + 1):
if i != page_number:
page_links += ' ' + \
create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': i,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
str(i)) + \
' '
elif nb_total_pages > 1:
page_links += ''' <b>%s</b> ''' % i
# Next
if page_number != nb_total_pages:
page_links += create_html_link(CFG_SITE_URL + '/yourcomments/',
{'page': page_number + 1,
'order_by': selected_order_by_option,
'per_page': selected_display_number_option,
'format': selected_display_format_option,
'ln': ln},
_("Next"))
out += '<br/><div id="yourcommentsnavigationlinks">' + page_links + '</div>'
return out
| gpl-2.0 | -5,415,654,279,626,427,000 | 51.491173 | 575 | 0.491009 | false |
TetraAsh/baruwa2 | baruwa/lib/auth/ldapauth.py | 1 | 7566 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Baruwa LDAP auth"""
import ldap
from zope.interface import implements
from repoze.who.utils import resolveDotted
from sqlalchemy.orm.exc import NoResultFound
from repoze.who.interfaces import IAuthenticator
from ldap.filter import escape_filter_chars, filter_format
from repoze.who.plugins.ldap.plugins import make_ldap_connection
from baruwa.lib.auth import check_param, check_failed_logins
# def build_search_filters(kwds, search_scope,
# searchfilter, domain,
# login, username):
# "Build LDAP filter"
# kwds['search_scope'] = search_scope
# if searchfilter != '':
# params = []
# domaindn = ','.join(['dc=' + part
# for part in domain.split('.')])
# mapping = {
# '%n':login,
# '%u':username,
# '%d':domain,
# '%D': domaindn
# }
# searchfilter = escape_filter_chars(searchfilter)
# for key in ['%n', '%u', '%d', '%D']:
# for run in searchfilter.count(key):
# searchfilter = searchfilter.replace(key, '%s', 1)
# params.append(mapping[run])
# searchfilter = filter_format(searchfilter, params)
# kwds['filterstr'] = searchfilter
def make_ldap_uri(addr, portno):
"Return LDAP URI"
nport = ''
if (portno and portno != 636 and portno != 389):
nport = ":%s" % str(portno)
scheme = 'ldaps://' if portno == 636 else 'ldap://'
ldapdict = dict(address=addr, port=nport, scheme=scheme,)
return "%(scheme)s%(address)s%(port)s" % ldapdict
class BaruwaLDAPAuthPlugin(object):
"""Baruwa LDAP auth plugin
Hooks into repoze.who.plugin.ldap
"""
implements(IAuthenticator)
name = 'ldap'
def __init__(self, dbsession, lsm, asm, dommodel, dam,
returned_id='login'):
"init"
self.dbsession = dbsession
self.lsm = lsm
self.asm = asm
self.dommodel = dommodel
self.dam = dam
self.naming_attribute = 'uid'
self.returned_id = returned_id
def __repr__(self):
"Repr"
return '<%s %s>' % (self.__class__.__name__, id(self))
def authenticate(self, environ, identity):
"Authenticate identity"
try:
if check_failed_logins(environ):
raise TypeError
login = identity['login']
username, domain = login.split('@')
try:
dma = self.dbsession.query(self.dommodel.name)\
.join(self.dam)\
.filter(self.dam.name == domain).one()
domain = dma.name
except NoResultFound:
pass
ldapsettings = self.dbsession.query(self.lsm,
self.asm.address,
self.asm.port,
self.asm.split_address)\
.join(self.asm)\
.join(self.dommodel)\
.filter(self.asm.enabled == True)\
.filter(self.dommodel.status == True)\
.filter(self.dommodel.name == domain)\
.one()
settings, address, port, split_address = ldapsettings
ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, 5)
ldap_uri = make_ldap_uri(address, port)
ldap_connection = make_ldap_connection(ldap_uri)
kwargs = dict(naming_attribute=settings.nameattribute,
returned_id=self.returned_id,
bind_dn=settings.binddn,
bind_pass=settings.bindpw,
start_tls=settings.usetls)
# if domain != domain_name:
# # override alias domain
# domain = domain_name
if settings.usesearch:
ldap_module = 'LDAPSearchAuthenticatorPlugin'
# build_search_filters(kwargs, settings.search_scope,
# settings.searchfilter, domain,
# login, username)
kwargs['search_scope'] = settings.search_scope
if settings.searchfilter != '':
params = []
domaindn = ','.join(['dc=' + part
for part in domain.split('.')])
mapping = {
'%n':login,
'%u':username,
'%d':domain,
'%D': domaindn
}
searchfilter = escape_filter_chars(settings.searchfilter)
for key in ['%n', '%u', '%d', '%D']:
for _ in xrange(searchfilter.count(key)):
searchfilter = searchfilter.replace(key, '%s', 1)
params.append(mapping[key])
searchfilter = filter_format(searchfilter, params)
kwargs['filterstr'] = searchfilter
else:
ldap_module = 'LDAPAuthenticatorPlugin'
if split_address:
identity['login'] = username
else:
# use main domain name not alias reset above
identity['login'] = "%s@%s" % (username, domain)
auth = resolveDotted('repoze.who.plugins.ldap:%s' % ldap_module)
ldap_auth = auth(ldap_connection, settings.basedn, **kwargs)
userid = ldap_auth.authenticate(environ, identity)
fulladdr = "%s@%s" % (username, domain)
return userid if userid is None or '@' in userid else fulladdr
except (KeyError, TypeError, ValueError, AttributeError,
NoResultFound, IndexError, ldap.LDAPError):
return None
def make_ldap_authenticator(dbsession, lsm, asm, dommodel, dam):
"return ldap authenticator"
for param in [('dbsession', dbsession),
('lsm', lsm),
('asm', asm),
('dommodel', dommodel),
('dam', dam)]:
check_param(param[0], param[1])
session = resolveDotted(dbsession)
ldapmodel = resolveDotted(lsm)
authmodel = resolveDotted(asm)
dmodel = resolveDotted(dommodel)
damodel = resolveDotted(dam)
authenticator = BaruwaLDAPAuthPlugin(session,
ldapmodel,
authmodel,
dmodel,
damodel)
return authenticator
| gpl-3.0 | -2,175,941,710,540,414,500 | 38 | 77 | 0.52194 | false |
robertwb/incubator-beam | sdks/python/apache_beam/runners/dataflow/dataflow_metrics.py | 5 | 12979 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
DataflowRunner implementation of MetricResults. It is in charge of
responding to queries of current metrics by going to the dataflow
service.
"""
# pytype: skip-file
import argparse
import logging
import numbers
import sys
from collections import defaultdict
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import DistributionResult
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricResult
from apache_beam.metrics.metric import MetricResults
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
_LOGGER = logging.getLogger(__name__)
def _get_match(proto, filter_fn):
"""Finds and returns the first element that matches a query.
If no element matches the query, it throws ValueError.
If more than one element matches the query, it returns only the first.
"""
query = [elm for elm in proto if filter_fn(elm)]
if len(query) == 0:
raise ValueError('Could not find element')
elif len(query) > 1:
raise ValueError('Too many matches')
return query[0]
# V1b3 MetricStructuredName keys to accept and copy to the MetricKey labels.
STEP_LABEL = 'step'
STRUCTURED_NAME_LABELS = set(
['execution_step', 'original_name', 'output_user_name'])
class DataflowMetrics(MetricResults):
"""Implementation of MetricResults class for the Dataflow runner."""
def __init__(self, dataflow_client=None, job_result=None, job_graph=None):
"""Initialize the Dataflow metrics object.
Args:
dataflow_client: apiclient.DataflowApplicationClient to interact with the
dataflow service.
job_result: DataflowPipelineResult with the state and id information of
the job.
job_graph: apiclient.Job instance to be able to translate between internal
step names (e.g. "s2"), and user step names (e.g. "split").
"""
super(DataflowMetrics, self).__init__()
self._dataflow_client = dataflow_client
self.job_result = job_result
self._queried_after_termination = False
self._cached_metrics = None
self._job_graph = job_graph
@staticmethod
def _is_counter(metric_result):
return isinstance(metric_result.attempted, numbers.Number)
@staticmethod
def _is_distribution(metric_result):
return isinstance(metric_result.attempted, DistributionResult)
def _translate_step_name(self, internal_name):
"""Translate between internal step names (e.g. "s1") and user step names."""
if not self._job_graph:
raise ValueError(
'Could not translate the internal step name %r since job graph is '
'not available.' % internal_name)
user_step_name = None
if (self._job_graph and internal_name in
self._job_graph.proto_pipeline.components.transforms.keys()):
# Dataflow Runner v2 with portable job submission uses proto transform map
# IDs for step names. Also PTransform.unique_name maps to user step names.
# Hence we lookup user step names based on the proto.
user_step_name = self._job_graph.proto_pipeline.components.transforms[
internal_name].unique_name
else:
try:
step = _get_match(
self._job_graph.proto.steps, lambda x: x.name == internal_name)
user_step_name = _get_match(
step.properties.additionalProperties,
lambda x: x.key == 'user_name').value.string_value
except ValueError:
pass # Exception is handled below.
if not user_step_name:
raise ValueError(
'Could not translate the internal step name %r.' % internal_name)
return user_step_name
def _get_metric_key(self, metric):
"""Populate the MetricKey object for a queried metric result."""
step = ""
name = metric.name.name # Always extract a name
labels = dict()
try: # Try to extract the user step name.
# If ValueError is thrown within this try-block, it is because of
# one of the following:
# 1. Unable to translate the step name. Only happening with improperly
# formatted job graph (unlikely), or step name not being the internal
# step name (only happens for unstructured-named metrics).
# 2. Unable to unpack [step] or [namespace]; which should only happen
# for unstructured names.
step = _get_match(
metric.name.context.additionalProperties,
lambda x: x.key == STEP_LABEL).value
step = self._translate_step_name(step)
except ValueError:
pass
namespace = "dataflow/v1b3" # Try to extract namespace or add a default.
try:
namespace = _get_match(
metric.name.context.additionalProperties,
lambda x: x.key == 'namespace').value
except ValueError:
pass
for kv in metric.name.context.additionalProperties:
if kv.key in STRUCTURED_NAME_LABELS:
labels[kv.key] = kv.value
# Package everything besides namespace and name the labels as well,
# including unmodified step names to assist in integration the exact
# unmodified values which come from dataflow.
return MetricKey(step, MetricName(namespace, name), labels=labels)
def _populate_metrics(self, response, result, user_metrics=False):
"""Move metrics from response to results as MetricResults."""
if user_metrics:
metrics = [
metric for metric in response.metrics if metric.name.origin == 'user'
]
else:
metrics = [
metric for metric in response.metrics
if metric.name.origin == 'dataflow/v1b3'
]
# Get the tentative/committed versions of every metric together.
metrics_by_name = defaultdict(lambda: {})
for metric in metrics:
if (metric.name.name.endswith('_MIN') or
metric.name.name.endswith('_MAX') or
metric.name.name.endswith('_MEAN') or
metric.name.name.endswith('_COUNT')):
# The Dataflow Service presents distribution metrics in two ways:
# One way is as a single distribution object with all its fields, and
# another way is as four different scalar metrics labeled as _MIN,
# _MAX, _COUNT_, _MEAN.
# TODO(pabloem) remove these when distributions are not being broken up
# in the service.
# The second way is only useful for the UI, and should be ignored.
continue
is_tentative = [
prop for prop in metric.name.context.additionalProperties
if prop.key == 'tentative' and prop.value == 'true'
]
tentative_or_committed = 'tentative' if is_tentative else 'committed'
metric_key = self._get_metric_key(metric)
if metric_key is None:
continue
metrics_by_name[metric_key][tentative_or_committed] = metric
# Now we create the MetricResult elements.
for metric_key, metric in metrics_by_name.items():
attempted = self._get_metric_value(metric['tentative'])
committed = self._get_metric_value(metric['committed'])
result.append(
MetricResult(metric_key, attempted=attempted, committed=committed))
def _get_metric_value(self, metric):
"""Get a metric result object from a MetricUpdate from Dataflow API."""
if metric is None:
return None
if metric.scalar is not None:
return metric.scalar.integer_value
elif metric.distribution is not None:
dist_count = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'count').value.integer_value
dist_min = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'min').value.integer_value
dist_max = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'max').value.integer_value
dist_sum = _get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'sum').value.integer_value
if not dist_sum:
# distribution metric is not meant to use on large values, but in case
# it is, the value can overflow and become double_value, the correctness
# of the value may not be guaranteed.
_LOGGER.info(
"Distribution metric sum value seems to have "
"overflowed integer_value range, the correctness of sum or mean "
"value may not be guaranteed: %s" % metric.distribution)
dist_sum = int(
_get_match(
metric.distribution.object_value.properties,
lambda x: x.key == 'sum').value.double_value)
return DistributionResult(
DistributionData(dist_sum, dist_count, dist_min, dist_max))
else:
return None
def _get_metrics_from_dataflow(self, job_id=None):
"""Return cached metrics or query the dataflow service."""
if not job_id:
try:
job_id = self.job_result.job_id()
except AttributeError:
job_id = None
if not job_id:
raise ValueError('Can not query metrics. Job id is unknown.')
if self._cached_metrics:
return self._cached_metrics
job_metrics = self._dataflow_client.get_job_metrics(job_id)
# If we cannot determine that the job has terminated,
# then metrics will not change and we can cache them.
if self.job_result and self.job_result.is_in_terminal_state():
self._cached_metrics = job_metrics
return job_metrics
def all_metrics(self, job_id=None):
"""Return all user and system metrics from the dataflow service."""
metric_results = []
response = self._get_metrics_from_dataflow(job_id=job_id)
self._populate_metrics(response, metric_results, user_metrics=True)
self._populate_metrics(response, metric_results, user_metrics=False)
return metric_results
def query(self, filter=None):
metric_results = []
response = self._get_metrics_from_dataflow()
self._populate_metrics(response, metric_results, user_metrics=True)
return {
self.COUNTERS: [
elm for elm in metric_results if self.matches(filter, elm.key) and
DataflowMetrics._is_counter(elm)
],
self.DISTRIBUTIONS: [
elm for elm in metric_results if self.matches(filter, elm.key) and
DataflowMetrics._is_distribution(elm)
],
self.GAUGES: []
} # TODO(pabloem): Add Gauge support for dataflow.
def main(argv):
"""Print the metric results for a the dataflow --job_id and --project.
Instead of running an entire pipeline which takes several minutes, use this
main method to display MetricResults for a specific --job_id and --project
which takes only a few seconds.
"""
# TODO(BEAM-6833): The MetricResults do not show translated step names as the
# job_graph is not provided to DataflowMetrics.
# Import here to avoid adding the dependency for local running scenarios.
try:
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.runners.dataflow.internal import apiclient
except ImportError:
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
if argv[0] == __file__:
argv = argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument(
'-j', '--job_id', type=str, help='The job id to query metrics for.')
parser.add_argument(
'-p',
'--project',
type=str,
help='The project name to query metrics for.')
flags = parser.parse_args(argv)
# Get a Dataflow API client and set its project and job_id in the options.
options = PipelineOptions()
gcloud_options = options.view_as(GoogleCloudOptions)
gcloud_options.project = flags.project
dataflow_client = apiclient.DataflowApplicationClient(options)
df_metrics = DataflowMetrics(dataflow_client)
all_metrics = df_metrics.all_metrics(job_id=flags.job_id)
_LOGGER.info(
'Printing all MetricResults for %s in %s', flags.job_id, flags.project)
for metric_result in all_metrics:
_LOGGER.info(metric_result)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
| apache-2.0 | 1,315,063,799,518,377,000 | 38.570122 | 80 | 0.682718 | false |
Titosoft/ferry-boat | web2py/gluon/tests/test_utils.py | 18 | 1269 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit tests for utils.py """
import sys
import os
import unittest
def fix_sys_path():
"""
logic to have always the correct sys.path
'', web2py/gluon, web2py/site-packages, web2py/ ...
"""
def add_path_first(path):
sys.path = [path] + [p for p in sys.path if (
not p == path and not p == (path + '/'))]
path = os.path.dirname(os.path.abspath(__file__))
if not os.path.isfile(os.path.join(path,'web2py.py')):
i = 0
while i<10:
i += 1
if os.path.exists(os.path.join(path,'web2py.py')):
break
path = os.path.abspath(os.path.join(path, '..'))
paths = [path,
os.path.abspath(os.path.join(path, 'site-packages')),
os.path.abspath(os.path.join(path, 'gluon')),
'']
[add_path_first(path) for path in paths]
fix_sys_path()
from utils import md5_hash
class TestUtils(unittest.TestCase):
""" Tests the utils.py module """
def test_md5_hash(self):
""" Tests the md5_hash function """
data = md5_hash("web2py rocks")
self.assertEqual(data, '79509f3246a2824dee64635303e99204')
if __name__ == '__main__':
unittest.main()
| mit | -4,610,418,095,572,131,300 | 23.403846 | 66 | 0.55792 | false |
cysuncn/python | spark/crm/PROC_A_R_INCOME_TOP.py | 1 | 7897 | #coding=UTF-8
from pyspark import SparkContext, SparkConf, SQLContext, Row, HiveContext
from pyspark.sql.types import *
from datetime import date, datetime, timedelta
import sys, re, os
st = datetime.now()
conf = SparkConf().setAppName('PROC_A_R_INCOME_TOP').setMaster(sys.argv[2])
sc = SparkContext(conf = conf)
sc.setLogLevel('WARN')
if len(sys.argv) > 5:
if sys.argv[5] == "hive":
sqlContext = HiveContext(sc)
else:
sqlContext = SQLContext(sc)
hdfs = sys.argv[3]
dbname = sys.argv[4]
#处理需要使用的日期
etl_date = sys.argv[1]
#etl日期
V_DT = etl_date
#上一日日期
V_DT_LD = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8])) + timedelta(-1)).strftime("%Y%m%d")
#月初日期
V_DT_FMD = date(int(etl_date[0:4]), int(etl_date[4:6]), 1).strftime("%Y%m%d")
#上月末日期
V_DT_LMD = (date(int(etl_date[0:4]), int(etl_date[4:6]), 1) + timedelta(-1)).strftime("%Y%m%d")
#10位日期
V_DT10 = (date(int(etl_date[0:4]), int(etl_date[4:6]), int(etl_date[6:8]))).strftime("%Y-%m-%d")
V_STEP = 0
#----------------------------------------------业务逻辑开始----------------------------------------------------------
#源表
OCRM_F_CI_CUST_DESC = sqlContext.read.parquet(hdfs+'/OCRM_F_CI_CUST_DESC/*')
OCRM_F_CI_CUST_DESC.registerTempTable("OCRM_F_CI_CUST_DESC")
ACRM_A_INOUTCOME = sqlContext.read.parquet(hdfs+'/ACRM_A_INOUTCOME/*')
ACRM_A_INOUTCOME.registerTempTable("ACRM_A_INOUTCOME")
ADMIN_AUTH_ORG = sqlContext.read.parquet(hdfs+'/ADMIN_AUTH_ORG/*')
ADMIN_AUTH_ORG.registerTempTable("ADMIN_AUTH_ORG")
#目标表
#ACRM_A_INCOME_TOP 全量表
#任务[21] 001-01::
V_STEP = V_STEP + 1
sql = """
SELECT ROW_NUMBER() OVER(
PARTITION BY A.ORG_ID
,C.CUST_TYP
ORDER BY ABS(COALESCE(COALESCE(SUM(A.BAL), 0), 0)) DESC) AS RANK
,A.ORG_ID AS ORG_ID
,B.ORG_NAME AS ORG_NAME
,A.CUST_ID AS CUST_ID
,C.CUST_ZH_NAME AS CUST_ZH_NAME
,C.CUST_TYP AS CUST_TYP
,C.CERT_TYPE AS CERT_TYPE
,C.CERT_NUM AS CERT_NUM
,ABS(COALESCE(SUM(A.BAL), 0)) AS BAL
,A.FR_ID AS FR_ID
FROM ACRM_A_INOUTCOME A --
INNER JOIN ADMIN_AUTH_ORG B --
ON A.ORG_ID = B.ORG_ID
AND A.FR_ID = B.FR_ID
LEFT JOIN OCRM_F_CI_CUST_DESC C --
ON A.CUST_ID = C.CUST_ID
AND C.FR_ID = A.FR_ID
WHERE A.BAL < 0
AND A.ODS_DATE = V_DT
GROUP BY A.ORG_ID
,B.ORG_NAME
,A.CUST_ID
,C.CUST_ZH_NAME
,C.CUST_TYP
,C.CERT_TYPE
,C.CERT_NUM
,A.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_INCOME_TOP_01 = sqlContext.sql(sql)
ACRM_A_INCOME_TOP_01.registerTempTable("ACRM_A_INCOME_TOP_01")
#dfn="ACRM_A_INCOME_TOP_01/"+V_DT+".parquet"
#ACRM_A_INCOME_TOP_01.cache()
nrows = ACRM_A_INCOME_TOP_01.count()
#ACRM_A_INCOME_TOP_01.write.save(path=hdfs + '/' + dfn, mode='overwrite')
#ACRM_A_INCOME_TOP_01.unpersist()
#ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_INCOME_TOP_01/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_INCOME_TOP_01 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[21] 001-02::
V_STEP = V_STEP + 1
sql = """
SELECT ROW_NUMBER() OVER(
PARTITION BY A.ORG_ID
,C.CUST_TYP
ORDER BY ABS(COALESCE(COALESCE(SUM(A.BAL), 0), 0)) DESC) AS RANK
,A.ORG_ID AS ORG_ID
,B.ORG_NAME AS ORG_NAME
,A.CUST_ID AS CUST_ID
,C.CUST_ZH_NAME AS CUST_ZH_NAME
,C.CUST_TYP AS CUST_TYP
,C.CERT_TYPE AS CERT_TYPE
,C.CERT_NUM AS CERT_NUM
,ABS(COALESCE(SUM(A.BAL), 0)) AS BAL
,A.FR_ID AS FR_ID
FROM ACRM_A_INOUTCOME A --
INNER JOIN ADMIN_AUTH_ORG B --
ON B.UP_ORG_ID = '320000000'
AND A.FR_ID = B.FR_ID
LEFT JOIN OCRM_F_CI_CUST_DESC C --
ON A.CUST_ID = C.CUST_ID
AND C.FR_ID = A.FR_ID
WHERE A.BAL < 0
AND A.ODS_DATE = V_DT
GROUP BY A.ORG_ID
,B.ORG_NAME
,A.CUST_ID
,C.CUST_ZH_NAME
,C.CUST_TYP
,C.CERT_TYPE
,C.CERT_NUM
,A.FR_ID """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_INCOME_TOP_02 = sqlContext.sql(sql)
ACRM_A_INCOME_TOP_02.registerTempTable("ACRM_A_INCOME_TOP_02")
#dfn="ACRM_A_INCOME_TOP_02/"+V_DT+".parquet"
#ACRM_A_INCOME_TOP_02.cache()
nrows = ACRM_A_INCOME_TOP_02.count()
#ACRM_A_INCOME_TOP_02.write.save(path=hdfs + '/' + dfn, mode='overwrite')
#ACRM_A_INCOME_TOP_02.unpersist()
#ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_INCOME_TOP_02/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_INCOME_TOP_02 lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[21] 001-03::
V_STEP = V_STEP + 1
sql = """
SELECT monotonically_increasing_id() AS ID
,ORG_ID AS ORG_ID
,ORG_NAME AS ORG_NAME
,CUST_ID AS CUST_ID
,CUST_ZH_NAME AS CUST_NAME
,ABS(A.BAL) AS CRE_BAL
,V_DT AS ODS_DATE
,CERT_TYPE AS CERT_TYPE
,CERT_NUM AS CERT_NUM
,CUST_TYP AS CUST_TYPE
FROM ACRM_A_INCOME_TOP_01 A --机构资金净流入TOP10临时表01
WHERE RANK <= 10 """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_INCOME_TOP = sqlContext.sql(sql)
ACRM_A_INCOME_TOP.registerTempTable("ACRM_A_INCOME_TOP")
dfn="ACRM_A_INCOME_TOP/"+V_DT+".parquet"
ACRM_A_INCOME_TOP.cache()
nrows = ACRM_A_INCOME_TOP.count()
ACRM_A_INCOME_TOP.write.save(path=hdfs + '/' + dfn, mode='overwrite')
ACRM_A_INCOME_TOP.unpersist()
#全量表,删除前一天数据
ret = os.system("hdfs dfs -rm -r /"+dbname+"/ACRM_A_INCOME_TOP/"+V_DT_LD+".parquet")
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_INCOME_TOP lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
#任务[11] 001-04::
V_STEP = V_STEP + 1
sql = """
SELECT monotonically_increasing_id() AS ID
,ORG_ID AS ORG_ID
,ORG_NAME AS ORG_NAME
,CUST_ID AS CUST_ID
,CUST_ZH_NAME AS CUST_NAME
,ABS(A.BAL) AS CRE_BAL
,V_DT AS ODS_DATE
,CERT_TYPE AS CERT_TYPE
,CERT_NUM AS CERT_NUM
,CUST_TYP AS CUST_TYPE
FROM ACRM_A_INCOME_TOP_02 A --机构资金净流入TOP10临时表02
WHERE RANK <= 10 """
sql = re.sub(r"\bV_DT\b", "'"+V_DT10+"'", sql)
ACRM_A_INCOME_TOP = sqlContext.sql(sql)
ACRM_A_INCOME_TOP.registerTempTable("ACRM_A_INCOME_TOP")
dfn="ACRM_A_INCOME_TOP/"+V_DT+".parquet"
ACRM_A_INCOME_TOP.cache()
nrows = ACRM_A_INCOME_TOP.count()
ACRM_A_INCOME_TOP.write.save(path=hdfs + '/' + dfn, mode='append')
ACRM_A_INCOME_TOP.unpersist()
et = datetime.now()
print("Step %d start[%s] end[%s] use %d seconds, insert ACRM_A_INCOME_TOP lines %d") % (V_STEP, st.strftime("%H:%M:%S"), et.strftime("%H:%M:%S"), (et-st).seconds, nrows)
| gpl-3.0 | 4,919,568,110,676,388,000 | 38.871134 | 172 | 0.523465 | false |
romain-dartigues/ansible | lib/ansible/modules/system/service_facts.py | 7 | 9295 | #!/usr/bin/python
# (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# originally copied from AWX's scan_services module to bring this functionality
# into Core
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: service_facts
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "2.5"
requirements: ["Any of the following supported init systems: systemd, sysv, upstart"]
notes:
- When accessing the C(ansible_facts.services) facts collected by this module,
it is recommended to not use "dot notation" because services can have a C(-)
character in their name which would result in invalid "dot notation", such as
C(ansible_facts.services.zuul-gateway). It is instead recommended to
using the string value of the service name as the key in order to obtain
the fact data value like C(ansible_facts.services['zuul-gateway'])
author:
- Adam Miller (@maxamillion)
'''
EXAMPLES = '''
- name: populate service facts
service_facts:
- debug:
var: ansible_facts.services
'''
RETURN = '''
ansible_facts:
description: Facts to add to ansible_facts about the services on the system
returned: always
type: complex
contains:
services:
description: States of the services with service name as key.
returned: always
type: complex
contains:
source:
description: Init system of the service. One of C(systemd), C(sysv), C(upstart).
returned: always
type: string
sample: sysv
state:
description: State of the service. Either C(running) or C(stopped).
returned: always
type: string
sample: running
name:
description: Name of the service.
returned: always
type: string
sample: arp-ethers.service
'''
import re
from ansible.module_utils.basic import AnsibleModule
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r", "")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
# print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
if m.group('rl3') == 'on':
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
# elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-units --no-pager --type service --all" % systemctl_path, use_unsafe_shell=True)
for line in [svc_line for svc_line in stdout.split('\n') if '.service' in svc_line and 'not-found' not in svc_line]:
service_name = line.split()[0]
if "running" in line:
state_val = "running"
else:
if 'failed' in line:
service_name = line.split()[1]
state_val = "stopped"
services[service_name] = {"name": service_name, "state": state_val, "source": "systemd"}
return services
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
module.run_command_environ_update = dict(LANG="C", LC_ALL="C")
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,845,405,428,721,974,000 | 38.892704 | 150 | 0.555675 | false |
jbenden/ansible | lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py | 8 | 5674 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_mon_notification_plan
short_description: Create or delete a Rackspace Cloud Monitoring notification
plan.
description:
- Create or delete a Rackspace Cloud Monitoring notification plan by
associating existing rax_mon_notifications with severity levels. Rackspace
monitoring module flow | rax_mon_entity -> rax_mon_check ->
rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
version_added: "2.0"
options:
state:
description:
- Ensure that the notification plan with this C(label) exists or does not
exist.
choices: ['present', 'absent']
label:
description:
- Defines a friendly name for this notification plan. String between 1 and
255 characters long.
required: true
critical_state:
description:
- Notification list to use when the alarm state is CRITICAL. Must be an
array of valid rax_mon_notification ids.
warning_state:
description:
- Notification list to use when the alarm state is WARNING. Must be an array
of valid rax_mon_notification ids.
ok_state:
description:
- Notification list to use when the alarm state is OK. Must be an array of
valid rax_mon_notification ids.
author: Ash Wilson
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Example notification plan
gather_facts: False
hosts: local
connection: local
tasks:
- name: Establish who gets called when.
rax_mon_notification_plan:
credentials: ~/.rax_pub
state: present
label: defcon1
critical_state:
- "{{ everyone['notification']['id'] }}"
warning_state:
- "{{ opsfloor['notification']['id'] }}"
register: defcon1
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
def notification_plan(module, state, label, critical_state, warning_state, ok_state):
if len(label) < 1 or len(label) > 255:
module.fail_json(msg='label must be between 1 and 255 characters long')
changed = False
notification_plan = None
cm = pyrax.cloud_monitoring
if not cm:
module.fail_json(msg='Failed to instantiate client. This typically '
'indicates an invalid region or an incorrectly '
'capitalized region name.')
existing = []
for n in cm.list_notification_plans():
if n.label == label:
existing.append(n)
if existing:
notification_plan = existing[0]
if state == 'present':
should_create = False
should_delete = False
if len(existing) > 1:
module.fail_json(msg='%s notification plans are labelled %s.' %
(len(existing), label))
if notification_plan:
should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
(warning_state and warning_state != notification_plan.warning_state) or \
(ok_state and ok_state != notification_plan.ok_state)
if should_delete:
notification_plan.delete()
should_create = True
else:
should_create = True
if should_create:
notification_plan = cm.create_notification_plan(label=label,
critical_state=critical_state,
warning_state=warning_state,
ok_state=ok_state)
changed = True
else:
for np in existing:
np.delete()
changed = True
if notification_plan:
notification_plan_dict = {
"id": notification_plan.id,
"critical_state": notification_plan.critical_state,
"warning_state": notification_plan.warning_state,
"ok_state": notification_plan.ok_state,
"metadata": notification_plan.metadata
}
module.exit_json(changed=changed, notification_plan=notification_plan_dict)
else:
module.exit_json(changed=changed)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
label=dict(required=True),
critical_state=dict(type='list'),
warning_state=dict(type='list'),
ok_state=dict(type='list')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
state = module.params.get('state')
label = module.params.get('label')
critical_state = module.params.get('critical_state')
warning_state = module.params.get('warning_state')
ok_state = module.params.get('ok_state')
setup_rax_module(module, pyrax)
notification_plan(module, state, label, critical_state, warning_state, ok_state)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,263,496,684,988,052,700 | 31.056497 | 104 | 0.61491 | false |
insiderr/insiderr-app | ios-patches/basemodules/twisted/names/test/test_names.py | 4 | 37675 | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import socket, operator, copy
from StringIO import StringIO
from functools import partial, reduce
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, dns
from twisted.names.dns import Message
from twisted.names.error import DomainError
from twisted.names.client import Resolver
from twisted.names.secondary import (
SecondaryAuthorityService, SecondaryAuthority)
from twisted.test.proto_helpers import StringTransport, MemoryReactorClock
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!"),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
def namesTest(self, querying, expectedRecords):
"""
Assert that the DNS response C{querying} will eventually fire with
contains exactly a certain collection of records.
@param querying: A L{Deferred} returned from one of the DNS client
I{lookup} methods.
@param expectedRecords: A L{list} of L{IRecord} providers which must be
in the response or the test will be failed.
@return: A L{Deferred} that fires when the assertion has been made. It
fires with a success result if the assertion succeeds and with a
L{Failure} if it fails.
"""
def checkResults(response):
receivedRecords = justPayload(response)
self.assertEqual(set(expectedRecords), set(receivedRecords))
querying.addCallback(checkResults)
return querying
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAddressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def test_mailExchangeRecord(self):
"""
The DNS client can issue an MX query and receive a response including
an MX record as well as any A record hints.
"""
return self.namesTest(
self.resolver.lookupMailExchange(b"test-domain.com"),
[dns.Record_MX(10, b"host.test-domain.com", ttl=19283784),
dns.Record_A(b"123.242.1.5", ttl=19283784),
dns.Record_A(b"0.255.0.255", ttl=19283784)])
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all', ttl=19283784),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:[email protected]!",
ttl=19283784)])
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when
it is passed to L{DNSServerFactory.messageReceived}.
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
class FakeProtocol(object):
def writeMessage(self, message):
pass
protocol = FakeProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
# RFC 1996, section 4.5
opCode = 4
self._messageReceivedTest('handleNotify', Message(opCode=opCode))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
# RFC 2136, section 1.3
opCode = 5
self._messageReceivedTest('handleOther', Message(opCode=opCode))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all
L{DNSProtocol} objects created by a factory which are
connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.failUnless(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEqual(self.results, self.records)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEqual(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_domainErrorForNameWithCommonSuffix(self):
"""
L{FileAuthority} lookup methods errback with L{DomainError} if
the requested C{name} shares a common suffix with its zone but
is not actually a descendant of its zone, in terms of its
sequence of DNS name labels. eg www.the-example.com has
nothing to do with the zone example.com.
"""
testDomain = test_domain_com
testDomainName = 'nonexistent.prefix-' + testDomain.soa[0]
f = self.failureResultOf(testDomain.lookupAddress(testDomainName))
self.assertIsInstance(f.value, DomainError)
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
str(soa_record.mname): [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
d = authority.lookupAddress(str(soa_record.mname))
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEqual(answer, [])
self.assertEqual(
authority, [
dns.RRHeader(
str(soa_record.mname), soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEqual(additional, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = 'example.' + str(soa_record.mname)
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
answer, authority, additional = self.successResultOf(d)
self.assertEqual(answer, [])
self.assertEqual(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEqual(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class AdditionalProcessingTests(unittest.TestCase):
"""
Tests for L{FileAuthority}'s additional processing for those record types
which require it (MX, CNAME, etc).
"""
_A = dns.Record_A(b"10.0.0.1")
_AAAA = dns.Record_AAAA(b"f080::1")
def _lookupSomeRecords(self, method, soa, makeRecord, target, addresses):
"""
Perform a DNS lookup against a L{FileAuthority} configured with records
as defined by C{makeRecord} and C{addresses}.
@param method: The name of the lookup method to use; for example,
C{"lookupNameservers"}.
@type method: L{str}
@param soa: A L{Record_SOA} for the zone for which the L{FileAuthority}
is authoritative.
@param makeRecord: A one-argument callable which accepts a name and
returns an L{IRecord} provider. L{FileAuthority} is constructed
with this record. The L{FileAuthority} is queried for a record of
the resulting type with the given name.
@param target: The extra name which the record returned by
C{makeRecord} will be pointed at; this is the name which might
require extra processing by the server so that all the available,
useful information is returned. For example, this is the target of
a CNAME record or the mail exchange host pointed to by an MX record.
@type target: L{bytes}
@param addresses: A L{list} of records giving addresses of C{target}.
@return: A L{Deferred} that fires with the result of the resolver
method give by C{method}.
"""
authority = NoFileAuthority(
soa=(soa.mname.name, soa),
records={
soa.mname.name: [makeRecord(target)],
target: addresses,
},
)
return getattr(authority, method)(soa_record.mname.name)
def assertRecordsMatch(self, expected, computed):
"""
Assert that the L{RRHeader} instances given by C{expected} and
C{computed} carry all the same information but without requiring the
records appear in the same order.
@param expected: A L{list} of L{RRHeader} instances giving the expected
records.
@param computed: A L{list} of L{RRHeader} instances giving the records
computed by the scenario under test.
@raise self.failureException: If the two collections of records disagree.
"""
# RRHeader instances aren't inherently ordered. Impose an ordering
# that's good enough for the purposes of these tests - in which we
# never have more than one record of a particular type.
key = lambda rr: rr.type
self.assertEqual(sorted(expected, key=key), sorted(computed, key=key))
def _additionalTest(self, method, makeRecord, addresses):
"""
Verify that certain address records are included in the I{additional}
section of a response generated by L{FileAuthority}.
@param method: See L{_lookupSomeRecords}
@param makeRecord: See L{_lookupSomeRecords}
@param addresses: A L{list} of L{IRecord} providers which the
I{additional} section of the response is required to match
(ignoring order).
@raise self.failureException: If the I{additional} section of the
response consists of different records than those given by
C{addresses}.
"""
target = b"mail." + soa_record.mname.name
d = self._lookupSomeRecords(
method, soa_record, makeRecord, target, addresses)
answer, authority, additional = self.successResultOf(d)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses],
additional)
def _additionalMXTest(self, addresses):
"""
Verify that a response to an MX query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupMailExchange", partial(dns.Record_MX, 10), addresses)
def test_mailExchangeAdditionalA(self):
"""
If the name of the MX response has A records, they are included in the
additional section of the response.
"""
self._additionalMXTest([self._A])
def test_mailExchangeAdditionalAAAA(self):
"""
If the name of the MX response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalMXTest([self._AAAA])
def test_mailExchangeAdditionalBoth(self):
"""
If the name of the MX response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalMXTest([self._A, self._AAAA])
def _additionalNSTest(self, addresses):
"""
Verify that a response to an NS query has certain records in the
I{additional} section.
@param addresses: See C{_additionalTest}
"""
self._additionalTest(
"lookupNameservers", dns.Record_NS, addresses)
def test_nameserverAdditionalA(self):
"""
If the name of the NS response has A records, they are included in the
additional section of the response.
"""
self._additionalNSTest([self._A])
def test_nameserverAdditionalAAAA(self):
"""
If the name of the NS response has AAAA records, they are included in
the additional section of the response.
"""
self._additionalNSTest([self._AAAA])
def test_nameserverAdditionalBoth(self):
"""
If the name of the NS response has both A and AAAA records, they are
all included in the additional section of the response.
"""
self._additionalNSTest([self._A, self._AAAA])
def _answerCNAMETest(self, addresses):
"""
Verify that a response to a CNAME query has certain records in the
I{answer} section.
@param addresses: See C{_additionalTest}
"""
target = b"www." + soa_record.mname.name
d = self._lookupSomeRecords(
"lookupCanonicalName", soa_record, dns.Record_CNAME, target,
addresses)
answer, authority, additional = self.successResultOf(d)
alias = dns.RRHeader(
soa_record.mname.name, dns.CNAME, ttl=soa_record.expire,
payload=dns.Record_CNAME(target), auth=True)
self.assertRecordsMatch(
[dns.RRHeader(
target, address.TYPE, ttl=soa_record.expire, payload=address,
auth=True)
for address in addresses] + [alias],
answer)
def test_canonicalNameAnswerA(self):
"""
If the name of the CNAME response has A records, they are included in
the answer section of the response.
"""
self._answerCNAMETest([self._A])
def test_canonicalNameAnswerAAAA(self):
"""
If the name of the CNAME response has AAAA records, they are included
in the answer section of the response.
"""
self._answerCNAMETest([self._AAAA])
def test_canonicalNameAnswerBoth(self):
"""
If the name of the CNAME response has both A and AAAA records, they are
all included in the answer section of the response.
"""
self._answerCNAMETest([self._A, self._AAAA])
class NoInitialResponseTestCase(unittest.TestCase):
def test_no_answer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
class SecondaryAuthorityServiceTests(unittest.TestCase):
"""
Tests for L{SecondaryAuthorityService}, a service which keeps one or more
authorities up to date by doing zone transfers from a master.
"""
def test_constructAuthorityFromHost(self):
"""
L{SecondaryAuthorityService} can be constructed with a C{str} giving a
master server address and several domains, causing the creation of a
secondary authority for each domain and that master server address and
the default DNS port.
"""
primary = '192.168.1.2'
service = SecondaryAuthorityService(
primary, ['example.com', 'example.org'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 53)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, 53)
self.assertEqual(service.domains[0].domain, 'example.com')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, 53)
self.assertEqual(service.domains[1].domain, 'example.org')
def test_constructAuthorityFromHostAndPort(self):
"""
L{SecondaryAuthorityService.fromServerAddressAndDomains} constructs a
new L{SecondaryAuthorityService} from a C{str} giving a master server
address and DNS port and several domains, causing the creation of a secondary
authority for each domain and that master server address and the given
DNS port.
"""
primary = '192.168.1.3'
port = 5335
service = SecondaryAuthorityService.fromServerAddressAndDomains(
(primary, port), ['example.net', 'example.edu'])
self.assertEqual(service.primary, primary)
self.assertEqual(service._port, 5335)
self.assertEqual(service.domains[0].primary, primary)
self.assertEqual(service.domains[0]._port, port)
self.assertEqual(service.domains[0].domain, 'example.net')
self.assertEqual(service.domains[1].primary, primary)
self.assertEqual(service.domains[1]._port, port)
self.assertEqual(service.domains[1].domain, 'example.edu')
class SecondaryAuthorityTests(unittest.TestCase):
"""
L{twisted.names.secondary.SecondaryAuthority} correctly constructs objects
with a specified IP address and optionally specified DNS port.
"""
def test_defaultPort(self):
"""
When constructed using L{SecondaryAuthority.__init__}, the default port
of 53 is used.
"""
secondary = SecondaryAuthority('192.168.1.1', 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 53)
self.assertEqual(secondary.domain, 'inside.com')
def test_explicitPort(self):
"""
When constructed using L{SecondaryAuthority.fromServerAddressAndDomain},
the specified port is used.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.1', 5353), 'inside.com')
self.assertEqual(secondary.primary, '192.168.1.1')
self.assertEqual(secondary._port, 5353)
self.assertEqual(secondary.domain, 'inside.com')
def test_transfer(self):
"""
An attempt is made to transfer the zone for the domain the
L{SecondaryAuthority} was constructed with from the server address it
was constructed with when L{SecondaryAuthority.transfer} is called.
"""
secondary = SecondaryAuthority.fromServerAddressAndDomain(
('192.168.1.2', 1234), 'example.com')
secondary._reactor = reactor = MemoryReactorClock()
secondary.transfer()
# Verify a connection attempt to the server address above
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0)
self.assertEqual(host, '192.168.1.2')
self.assertEqual(port, 1234)
# See if a zone transfer query is issued.
proto = factory.buildProtocol((host, port))
transport = StringTransport()
proto.makeConnection(transport)
msg = Message()
# DNSProtocol.writeMessage length encodes the message by prepending a
# 2 byte message length to the buffered value.
msg.decode(StringIO(transport.value()[2:]))
self.assertEqual(
[dns.Query('example.com', dns.AXFR, dns.IN)], msg.queries)
| gpl-3.0 | 8,037,446,241,315,203,000 | 35.087165 | 110 | 0.593842 | false |
gamechanger/kafka-python | test/test_client_async.py | 1 | 8085 | import time
import pytest
from kafka.client_async import KafkaClient
from kafka.common import BrokerMetadata
import kafka.common as Errors
from kafka.conn import ConnectionStates
from kafka.future import Future
from kafka.protocol.metadata import MetadataResponse, MetadataRequest
from kafka.protocol.produce import ProduceRequest
@pytest.mark.parametrize("bootstrap,expected_hosts", [
(None, [('localhost', 9092)]),
('foobar:1234', [('foobar', 1234)]),
('fizzbuzz', [('fizzbuzz', 9092)]),
('foo:12,bar:34', [('foo', 12), ('bar', 34)]),
(['fizz:56', 'buzz'], [('fizz', 56), ('buzz', 9092)]),
])
def test_bootstrap_servers(mocker, bootstrap, expected_hosts):
mocker.patch.object(KafkaClient, '_bootstrap')
if bootstrap is None:
KafkaClient()
else:
KafkaClient(bootstrap_servers=bootstrap)
# host order is randomized internally, so resort before testing
(hosts,), _ = KafkaClient._bootstrap.call_args # pylint: disable=no-member
assert sorted(hosts) == sorted(expected_hosts)
@pytest.fixture
def conn(mocker):
conn = mocker.patch('kafka.client_async.BrokerConnection')
conn.return_value = conn
conn.state = ConnectionStates.CONNECTED
conn.send.return_value = Future().success(
MetadataResponse(
[(0, 'foo', 12), (1, 'bar', 34)], # brokers
[])) # topics
conn.blacked_out.return_value = False
conn.connect.return_value = conn.state
return conn
def test_bootstrap_success(conn):
conn.state = ConnectionStates.CONNECTED
cli = KafkaClient()
conn.assert_called_once_with('localhost', 9092, **cli.config)
conn.connect.assert_called_with()
conn.send.assert_called_once_with(MetadataRequest([]))
assert cli._bootstrap_fails == 0
assert cli.cluster.brokers() == set([BrokerMetadata(0, 'foo', 12),
BrokerMetadata(1, 'bar', 34)])
def test_bootstrap_failure(conn):
conn.state = ConnectionStates.DISCONNECTED
cli = KafkaClient()
conn.assert_called_once_with('localhost', 9092, **cli.config)
conn.connect.assert_called_with()
conn.close.assert_called_with()
assert cli._bootstrap_fails == 1
assert cli.cluster.brokers() == set()
def test_can_connect(conn):
cli = KafkaClient()
# Node is not in broker metadata - cant connect
assert not cli._can_connect(2)
# Node is in broker metadata but not in _conns
assert 0 not in cli._conns
assert cli._can_connect(0)
# Node is connected, can't reconnect
cli._initiate_connect(0)
assert not cli._can_connect(0)
# Node is disconnected, can connect
cli._conns[0].state = ConnectionStates.DISCONNECTED
assert cli._can_connect(0)
# Node is disconnected, but blacked out
conn.blacked_out.return_value = True
assert not cli._can_connect(0)
def test_initiate_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._initiate_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
assert 0 not in cli._conns
state = cli._initiate_connect(0)
assert cli._conns[0] is conn
assert state is conn.state
def test_finish_connect(conn):
cli = KafkaClient()
try:
# Node not in metadata, raises AssertionError
cli._initiate_connect(2)
except AssertionError:
pass
else:
assert False, 'Exception not raised'
assert 0 not in cli._conns
cli._initiate_connect(0)
conn.connect.return_value = ConnectionStates.CONNECTING
state = cli._finish_connect(0)
assert 0 in cli._connecting
assert state is ConnectionStates.CONNECTING
conn.connect.return_value = ConnectionStates.CONNECTED
state = cli._finish_connect(0)
assert 0 not in cli._connecting
assert state is ConnectionStates.CONNECTED
# Failure to connect should trigger metadata update
assert not cli.cluster._need_update
cli._connecting.add(0)
conn.connect.return_value = ConnectionStates.DISCONNECTED
state = cli._finish_connect(0)
assert 0 not in cli._connecting
assert state is ConnectionStates.DISCONNECTED
assert cli.cluster._need_update
def test_ready(conn):
cli = KafkaClient()
# Node not in metadata
assert not cli.ready(2)
# Node in metadata will connect
assert 0 not in cli._conns
assert cli.ready(0)
assert 0 in cli._conns
assert cli._conns[0].state is ConnectionStates.CONNECTED
# metadata refresh blocks ready nodes
assert cli.ready(0)
assert cli.ready(1)
cli._metadata_refresh_in_progress = True
assert not cli.ready(0)
assert not cli.ready(1)
# requesting metadata update also blocks ready nodes
cli._metadata_refresh_in_progress = False
assert cli.ready(0)
assert cli.ready(1)
cli.cluster.request_update()
cli.cluster.config['retry_backoff_ms'] = 0
assert not cli._metadata_refresh_in_progress
assert not cli.ready(0)
assert not cli.ready(1)
cli.cluster._need_update = False
# if connection can't send more, not ready
assert cli.ready(0)
assert cli.ready(1)
conn.can_send_more.return_value = False
assert not cli.ready(0)
conn.can_send_more.return_value = True
# disconnected nodes, not ready
assert cli.ready(0)
assert cli.ready(1)
conn.connected.return_value = False
assert not cli.ready(0)
conn.connected.return_value = True
# connecting node connects
cli._connecting.add(0)
conn.connected.return_value = False
cli.ready(0)
assert 0 not in cli._connecting
assert cli._conns[0].connect.called_with()
def test_close(conn):
cli = KafkaClient()
# Unknown node - silent
cli.close(2)
# Single node close
cli._initiate_connect(0)
assert not conn.close.call_count
cli.close(0)
assert conn.close.call_count == 1
# All node close
cli._initiate_connect(1)
cli.close()
assert conn.close.call_count == 3
def test_is_disconnected(conn):
cli = KafkaClient()
# False if not connected yet
conn.state = ConnectionStates.DISCONNECTED
assert not cli.is_disconnected(0)
cli._initiate_connect(0)
assert cli.is_disconnected(0)
conn.state = ConnectionStates.CONNECTING
assert not cli.is_disconnected(0)
conn.state = ConnectionStates.CONNECTED
assert not cli.is_disconnected(0)
def test_send(conn):
cli = KafkaClient()
try:
cli.send(2, None)
except Errors.NodeNotReadyError:
pass
else:
assert False, 'NodeNotReadyError not raised'
cli._initiate_connect(0)
# ProduceRequest w/ 0 required_acks -> no response
request = ProduceRequest(0, 0, [])
ret = cli.send(0, request)
assert conn.send.called_with(request, expect_response=False)
assert isinstance(ret, Future)
request = MetadataRequest([])
cli.send(0, request)
assert conn.send.called_with(request, expect_response=True)
def test_poll(mocker):
mocker.patch.object(KafkaClient, '_bootstrap')
metadata = mocker.patch.object(KafkaClient, '_maybe_refresh_metadata')
_poll = mocker.patch.object(KafkaClient, '_poll')
cli = KafkaClient()
tasks = mocker.patch.object(cli._delayed_tasks, 'next_at')
# metadata timeout wins
metadata.return_value = 1000
tasks.return_value = 2
cli.poll()
_poll.assert_called_with(1.0)
# user timeout wins
cli.poll(250)
_poll.assert_called_with(0.25)
# tasks timeout wins
tasks.return_value = 0
cli.poll(250)
_poll.assert_called_with(0)
# default is request_timeout_ms
metadata.return_value = 1000000
tasks.return_value = 10000
cli.poll()
_poll.assert_called_with(cli.config['request_timeout_ms'] / 1000.0)
def test__poll():
pass
def test_in_flight_request_count():
pass
def test_least_loaded_node():
pass
def test_set_topics():
pass
def test_maybe_refresh_metadata():
pass
def test_schedule():
pass
def test_unschedule():
pass
| apache-2.0 | -6,498,337,683,652,845,000 | 25.95 | 79 | 0.671614 | false |
tadeo/django-localflavor-uy | tests/tests.py | 14 | 2030 | from __future__ import unicode_literals
from django.contrib.localflavor.uy.forms import UYDepartamentSelect, UYCIField
from django.contrib.localflavor.uy.util import get_validation_digit
from django.test import SimpleTestCase
class UYLocalFlavorTests(SimpleTestCase):
def test_UYDepartmentSelect(self):
f = UYDepartamentSelect()
out = '''<select name="departamentos">
<option value="G">Artigas</option>
<option value="A">Canelones</option>
<option value="E">Cerro Largo</option>
<option value="L">Colonia</option>
<option value="Q">Durazno</option>
<option value="N">Flores</option>
<option value="O">Florida</option>
<option value="P">Lavalleja</option>
<option value="B">Maldonado</option>
<option value="S" selected="selected">Montevideo</option>
<option value="I">Paysand\xfa</option>
<option value="J">R\xedo Negro</option>
<option value="F">Rivera</option>
<option value="C">Rocha</option>
<option value="H">Salto</option>
<option value="M">San Jos\xe9</option>
<option value="K">Soriano</option>
<option value="R">Tacuaremb\xf3</option>
<option value="D">Treinta y Tres</option>
</select>'''
self.assertHTMLEqual(f.render('departamentos', 'S'), out)
def test_UYCIField(self):
error_format = ['Enter a valid CI number in X.XXX.XXX-X,XXXXXXX-X or XXXXXXXX format.']
error_invalid = ['Enter a valid CI number.']
valid = {
'4098053': '4098053',
'409805-3': '409805-3',
'409.805-3': '409.805-3',
'10054112': '10054112',
'1005411-2': '1005411-2',
'1.005.411-2': '1.005.411-2',
}
invalid = {
'foo': ['Enter a valid CI number in X.XXX.XXX-X,XXXXXXX-X or XXXXXXXX format.'],
'409805-2': ['Enter a valid CI number.'],
'1.005.411-5': ['Enter a valid CI number.'],
}
self.assertFieldOutput(UYCIField, valid, invalid)
self.assertEqual(get_validation_digit(409805), 3)
self.assertEqual(get_validation_digit(1005411), 2)
| bsd-3-clause | 1,419,349,010,218,286,300 | 37.301887 | 95 | 0.648768 | false |
usc-isi/nova | nova/network/minidns.py | 19 | 6184 | # Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
from nova import exception
from nova import flags
class MiniDNS(object):
""" Trivial DNS driver. This will read/write to a local, flat file
and have no effect on your actual DNS system. This class is
strictly for testing purposes, and should keep you out of dependency
hell.
Note that there is almost certainly a race condition here that
will manifest anytime instances are rapidly created and deleted.
A proper implementation will need some manner of locking."""
def __init__(self):
if flags.FLAGS.logdir:
self.filename = os.path.join(flags.FLAGS.logdir, "dnstest.txt")
else:
self.filename = "dnstest.txt"
if not os.path.exists(self.filename):
f = open(self.filename, "w+")
f.write("# minidns\n\n\n")
f.close()
def get_domains(self):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if entry and entry['address'].lower() == 'domain'.lower():
entries.append(entry['name'])
infile.close()
return entries
def qualify(self, name, domain):
if domain:
qualified = "%s.%s" % (name, domain)
else:
qualified = name
return qualified
def create_entry(self, name, address, type, domain):
if type.lower() != 'a':
raise exception.InvalidInput(_("This driver only supports "
"type 'a'"))
if self.get_entries_by_name(name, domain):
raise exception.FloatingIpDNSExists(name=name, domain=domain)
outfile = open(self.filename, 'a+')
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), type))
outfile.close()
def parse_line(self, line):
vals = line.split()
if len(vals) < 3:
return None
else:
entry = {}
entry['address'] = vals[0]
entry['name'] = vals[1]
entry['type'] = vals[2]
if entry['address'] == 'domain':
entry['domain'] = entry['name']
else:
entry['domain'] = entry['name'].partition('.')[2]
return entry
def delete_entry(self, name, domain):
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['name'] != self.qualify(name, domain).lower()):
outfile.write(line)
else:
deleted = True
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
raise exception.NotFound
def modify_address(self, name, address, domain):
if not self.get_entries_by_name(name, domain):
raise exception.NotFound
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'].lower() == self.qualify(name, domain).lower()):
outfile.write("%s %s %s\n" %
(address, self.qualify(name, domain), entry['type']))
else:
outfile.write(line)
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
def get_entries_by_address(self, address, domain):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if entry and entry['address'].lower() == address.lower():
if entry['name'].lower().endswith(domain.lower()):
domain_index = entry['name'].lower().find(domain.lower())
entries.append(entry['name'][0:domain_index - 1])
infile.close()
return entries
def get_entries_by_name(self, name, domain):
entries = []
infile = open(self.filename, 'r')
for line in infile:
entry = self.parse_line(line)
if (entry and
entry['name'].lower() == self.qualify(name, domain).lower()):
entries.append(entry['address'])
infile.close()
return entries
def delete_dns_file(self):
os.remove(self.filename)
def create_domain(self, fqdomain):
if self.get_entries_by_name(fqdomain, ''):
raise exception.FloatingIpDNSExists(name=fqdomain, domain='')
outfile = open(self.filename, 'a+')
outfile.write("%s %s %s\n" %
('domain', fqdomain, 'domain'))
outfile.close()
def delete_domain(self, fqdomain):
deleted = False
infile = open(self.filename, 'r')
outfile = tempfile.NamedTemporaryFile('w', delete=False)
for line in infile:
entry = self.parse_line(line)
if ((not entry) or
entry['domain'] != fqdomain):
outfile.write(line)
else:
print "deleted %s" % entry
deleted = True
infile.close()
outfile.close()
shutil.move(outfile.name, self.filename)
if not deleted:
raise exception.NotFound
| apache-2.0 | 5,953,969,610,956,320,000 | 33.741573 | 78 | 0.560317 | false |
bdyetton/prettychart | framework/sessions/__init__.py | 5 | 5928 | # -*- coding: utf-8 -*-
import furl
import urllib
import urlparse
import bson.objectid
import httplib as http
import itsdangerous
from werkzeug.local import LocalProxy
from weakref import WeakKeyDictionary
from flask import request, make_response
from framework.flask import redirect
from website import settings
from .model import Session
def add_key_to_url(url, scheme, key):
"""Redirects the user to the requests URL with the given key appended
to the query parameters.
"""
query = request.args.to_dict()
query['view_only'] = key
replacements = {'query': urllib.urlencode(query)}
if scheme:
replacements['scheme'] = scheme
parsed_url = urlparse.urlparse(url)
if parsed_url.fragment:
# Fragments should exists server side so this mean some one set up a # in the url
# WSGI sucks and auto unescapes it so we just shove it back into the path with the escaped hash
replacements['path'] = '{}%23{}'.format(parsed_url.path, parsed_url.fragment)
replacements['fragment'] = ''
parsed_redirect_url = parsed_url._replace(**replacements)
return urlparse.urlunparse(parsed_redirect_url)
def prepare_private_key():
"""`before_request` handler that checks the Referer header to see if the user
is requesting from a view-only link. If so, reappend the view-only key.
NOTE: In order to ensure the execution order of the before_request callbacks,
this is attached in website.app.init_app rather than using
@app.before_request.
"""
# Done if not GET request
if request.method != 'GET':
return
# Done if private_key in args
key_from_args = request.args.get('view_only', '')
if key_from_args:
return
# grab query key from previous request for not login user
if request.referrer:
referrer_parsed = urlparse.urlparse(request.referrer)
scheme = referrer_parsed.scheme
key = urlparse.parse_qs(
urlparse.urlparse(request.referrer).query
).get('view_only')
if key:
key = key[0]
else:
scheme = None
key = None
# Update URL and redirect
if key and not session.is_authenticated:
new_url = add_key_to_url(request.url, scheme, key)
return redirect(new_url, code=http.TEMPORARY_REDIRECT)
def get_session():
session = sessions.get(request._get_current_object())
if not session:
session = Session()
set_session(session)
return session
def set_session(session):
sessions[request._get_current_object()] = session
def create_session(response, data=None):
current_session = get_session()
if current_session:
current_session.data.update(data or {})
current_session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(current_session._id)
else:
session_id = str(bson.objectid.ObjectId())
session = Session(_id=session_id, data=data or {})
session.save()
cookie_value = itsdangerous.Signer(settings.SECRET_KEY).sign(session_id)
set_session(session)
if response is not None:
response.set_cookie(settings.COOKIE_NAME, value=cookie_value)
return response
sessions = WeakKeyDictionary()
session = LocalProxy(get_session)
# Request callbacks
# NOTE: This gets attached in website.app.init_app to ensure correct callback
# order
def before_request():
from framework import sentry
from framework.auth import cas
from framework.auth.core import User
from framework.auth import authenticate
from framework.routing import json_renderer
# Central Authentication Server Ticket Validation and Authentication
ticket = request.args.get('ticket')
if ticket:
service_url = furl.furl(request.url)
service_url.args.pop('ticket')
# Attempt autn wih CAS, and return a proper redirect response
return cas.make_response_from_ticket(ticket=ticket, service_url=service_url.url)
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
user = User.load(cas_resp.user)
return authenticate(user, access_token=access_token, response=None)
return make_response('', http.UNAUTHORIZED)
if request.authorization:
# TODO: Fix circular import
from framework.auth.core import get_user
user = get_user(
email=request.authorization.username,
password=request.authorization.password
)
# Create empty session
# TODO: Shoudn't need to create a session for Basic Auth
session = Session()
if user:
session.data['auth_user_username'] = user.username
session.data['auth_user_id'] = user._primary_key
session.data['auth_user_fullname'] = user.fullname
else:
# Invalid key: Not found in database
session.data['auth_error_code'] = http.FORBIDDEN
set_session(session)
return
cookie = request.cookies.get(settings.COOKIE_NAME)
if cookie:
try:
session_id = itsdangerous.Signer(settings.SECRET_KEY).unsign(cookie)
session = Session.load(session_id) or Session(_id=session_id)
set_session(session)
return
except:
pass
def after_request(response):
if session.data.get('auth_user_id'):
session.save()
return response
| apache-2.0 | -9,171,792,291,571,134,000 | 30.870968 | 103 | 0.659413 | false |
BBN-Q/QGL | QGL/mm.py | 1 | 1200 | '''
Taken from Guido van Rossum's Five-Minute MultiMethods in Python
http://www.artima.com/weblogs/viewpost.jsp?thread=101605
Examples:
from mm import multimethod
@multimethod(int, int)
def foo(a, b):
...code for two ints...
@multimethod(float, float):
def foo(a, b):
...code for two floats...
@multimethod(str, str):
def foo(a, b):
...code for two strings...
'''
registry = {}
class MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args) # a generator expression!
function = self.typemap.get(types)
if function is None:
raise TypeError("no match")
return function(*args)
def register(self, types, function):
if types in self.typemap:
raise TypeError("duplicate registration")
self.typemap[types] = function
def multimethod(*types):
def register(function):
name = function.__name__
mm = registry.get(name)
if mm is None:
mm = registry[name] = MultiMethod(name)
mm.register(types, function)
return mm
return register
| apache-2.0 | -2,178,630,334,399,662,300 | 22.529412 | 79 | 0.611667 | false |
beav/pulp | server/pulp/server/webservices/serialization/link.py | 3 | 2631 | # -*- coding: utf-8 -*-
#
# Copyright © 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
"""
Generation of link objects for REST object serialization.
link object:
{
"_href": <uri path to resource or collection>
}
"""
import copy
from pulp.server.webservices import http
_LINK_OBJ_SKEL = {
'_href': None,
}
def link_obj(href):
"""
Create a link object for an arbitrary path.
@param href: uri path
@type href: str
@return: link object
@rtype: dict
"""
link = copy.copy(_LINK_OBJ_SKEL)
link['_href'] = href
return link
def current_link_obj():
"""
Create a link object for the path for the current request.
@return: link object
@rtype: dict
"""
link = copy.copy(_LINK_OBJ_SKEL)
link['_href'] = http.uri_path()
return link
def child_link_obj(*path_elements):
"""
Create a link object that appends the given elements to the path of the
current request.
Example: current request path = '/foo/bar/baz/'
path elements = ['fee', 'fie']
returned path = '/foo/bar/baz/fee/fie/'
@param path_elements: path elements to append
@type path_elements: *str
@return: link object
@rtype: dict
"""
suffix = '/'.join(path_elements)
link = copy.copy(_LINK_OBJ_SKEL)
link['_href'] = http.extend_uri_path(suffix)
return link
def search_safe_link_obj(resource_id):
"""
Like child_link_obj, except this can be used with search results. If the
current request URL ends with 'search/', that gets stripped off before
creating the object link. If it does not end with 'search/', this acts just
like child_link_obj.
@param resource_id: id of the resource to which you need a link
@type resource_id: basestring
@return: dict with '_href' key and corresponding value
@rtype: dict
"""
search_suffix = 'search/'
uri_path = http.uri_path()
if uri_path.endswith(search_suffix):
uri_path = uri_path[:-len(search_suffix)]
link = copy.copy(_LINK_OBJ_SKEL)
link['_href'] = http.extend_uri_path(resource_id, uri_path)
return link
| gpl-2.0 | -6,191,462,820,066,760,000 | 27.27957 | 80 | 0.656274 | false |
vmrob/needy | needy/filesystem.py | 3 | 4963 | import os
import shutil
import signal
import tempfile
import time
import json
import hashlib
from contextlib import contextmanager
O_BINARY = getattr(os, 'O_BINARY', 0)
class TempDir:
def __enter__(self):
self.__path = tempfile.mkdtemp()
return self.__path
def __exit__(self, etype, value, traceback):
shutil.rmtree(self.__path)
class SignalTimeout:
def __init__(self, seconds):
self.__seconds = seconds
@staticmethod
def timeout_handler(signum, frame):
pass
def __enter__(self):
self.__previous_handler = signal.signal(signal.SIGALRM, SignalTimeout.timeout_handler)
signal.alarm(self.__seconds)
def __exit__(self, etype, value, traceback):
signal.alarm(0)
signal.signal(signal.SIGALRM, self.__previous_handler)
def __win32_lock_fd(fd, timeout=None):
'''returns True if the file descriptor is successfully locked'''
import pywintypes
import win32con
import win32file
import winerror
try:
handle = win32file._get_osfhandle(fd)
if timeout is None:
win32file.LockFileEx(handle, win32con.LOCKFILE_EXCLUSIVE_LOCK, 0, -0x10000, pywintypes.OVERLAPPED())
return True
if timeout > 0:
start = time.time()
while True:
try:
win32file.LockFileEx(handle, win32con.LOCKFILE_EXCLUSIVE_LOCK | win32con.LOCKFILE_FAIL_IMMEDIATELY, 0, -0x10000, pywintypes.OVERLAPPED())
return True
except pywintypes.error as e:
if e.winerror != winerror.ERROR_LOCK_VIOLATION:
break
time.sleep(0.05)
if time.time() > start + timeout:
break
else:
win32file.LockFileEx(handle, win32con.LOCKFILE_EXCLUSIVE_LOCK | win32con.LOCKFILE_FAIL_IMMEDIATELY, 0, -0x10000, pywintypes.OVERLAPPED())
return True
except pywintypes.error:
pass
return False
def __fcntl_lock_fd(fd, timeout=None):
'''returns True if the file descriptor is successfully locked'''
import fcntl
try:
if timeout is None:
fcntl.flock(fd, fcntl.LOCK_EX)
elif timeout > 0:
with SignalTimeout(timeout):
fcntl.flock(fd, fcntl.LOCK_EX)
else:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
return False
return True
def lock_fd(fd, timeout=None):
'''returns True if the file descriptor is successfully locked'''
try:
return __win32_lock_fd(fd, timeout)
except ImportError:
return __fcntl_lock_fd(fd, timeout)
return False
def lock_file(file_path, timeout=None):
'''returns file descriptor to newly locked file or None if file couldn't be locked'''
fd = os.open(file_path, os.O_RDWR | os.O_CREAT | O_BINARY)
try:
if lock_fd(fd, timeout):
return fd
except:
os.close(fd)
raise
os.close(fd)
return None
def clean_file(file_path):
parent_dir = os.path.dirname(file_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
elif os.path.exists(file_path):
os.remove(file_path)
def clean_directory(directory_path):
if os.path.exists(directory_path):
shutil.rmtree(directory_path)
os.makedirs(directory_path)
def os_file(path, flags, mode):
fd = os.open(path, flags)
return os.fdopen(fd, mode)
def copy_if_changed(src, dst):
''' Wrapper around shutil.copy2 that only performs a copy if src and dst differ in content'''
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.split(src)[1])
if os.path.exists(dst):
with open(src, 'rb') as f:
src_file_hash = file_hash(f, hashlib.sha256())
with open(dst, 'rb') as f:
dst_file_hash = file_hash(f, hashlib.sha256())
if src_file_hash == dst_file_hash:
return
shutil.copy2(src, dst)
# from http://stackoverflow.com/questions/3431825
def file_hash(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
@contextmanager
def dict_file(path):
d = dict()
if os.path.exists(path):
with open(path, 'r') as f:
contents = f.read()
if contents:
d = json.loads(contents)
yield d
with open(path, 'w') as f:
json.dump(d, f)
def force_rmtree(path):
def rmtree_onerror(func, path, exc_info):
''' from http://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied '''
import stat
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
shutil.rmtree(path, onerror=rmtree_onerror)
| mit | -5,261,998,540,726,239,000 | 27.198864 | 157 | 0.603869 | false |
ryanmockabee/golfr | flask/lib/python3.6/site-packages/py/_path/common.py | 4 | 14844 | """
"""
import os, sys, posixpath
import fnmatch
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
try:
from os import fspath
except ImportError:
def fspath(path):
"""
Return the string representation of the path.
If str or bytes is passed in, it is returned unchanged.
This code comes from PEP 519, modified to support earlier versions of
python.
This is required for python < 3.6.
"""
if isinstance(path, (py.builtin.text, py.builtin.bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
return path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
try:
import pathlib
except ImportError:
pass
else:
if isinstance(path, pathlib.PurePath):
return py.builtin.text(path)
raise TypeError("expected str, bytes or os.PathLike object, not "
+ path_type.__name__)
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(fspath(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if sys.version_info < (3, ):
mode = 'rU'
else: # python 3 deprecates mode "U" in favor of "newline" option
mode = 'r'
if not cr:
content = self.read(mode)
return content.split('\n')
else:
f = self.open(mode)
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
def __fspath__(self):
return self.strpath
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, py.builtin._basestring):
fil = FNMatcher(fil)
if isinstance(rec, py.builtin._basestring):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return fnmatch.fnmatch(name, pattern)
| mit | 7,387,646,999,960,025,000 | 31.357303 | 82 | 0.518795 | false |
rmdort/clipper | integration-tests/deploy_pyspark_pipeline_models.py | 2 | 7133 | from __future__ import absolute_import, print_function
import os
import sys
import requests
import json
import numpy as np
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/.." % cur_dir))
from clipper_admin import Clipper
import time
import subprocess32 as subprocess
import pprint
import random
import socket
import findspark
findspark.init()
import pyspark
from pyspark.ml import Pipeline
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
headers = {'Content-type': 'application/json'}
app_name = "pyspark_pipeline_test"
model_name = "pyspark_pipeline"
class BenchmarkException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
# range of ports where available ports can be found
PORT_RANGE = [34256, 40000]
def find_unbound_port():
"""
Returns an unbound port number on 127.0.0.1.
"""
while True:
port = random.randint(*PORT_RANGE)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(("127.0.0.1", port))
return port
except socket.error:
print("randomly generated port %d is bound. Trying again." % port)
def init_clipper():
clipper = Clipper("localhost", redis_port=find_unbound_port())
clipper.stop_all()
clipper.start()
time.sleep(1)
return clipper
columns = ["id", "text"]
def json_to_dataframe(spark_session, xs):
tuples = [tuple(json.loads(x)) for x in xs]
df = spark_session.createDataFrame(tuples, columns)
return df
def predict(spark, pipeline, xs):
df = json_to_dataframe(spark, xs)
preds = pipeline.transform(df)
selected = preds.select("probability", "prediction")
outputs = []
for row in selected.collect():
prob, prediction = row
outputs.append(
json.dumps({
"prob": str(prob),
"prediction": prediction
}))
return outputs
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("clipper-pyspark")\
.getOrCreate()
training = spark.createDataFrame([(0, "a b c d e spark", 1.0), (
1, "b d", 0.0), (2, "spark f g h", 1.0), (3, "hadoop mapreduce", 0.0)],
columns + ["label"])
# Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr.
tokenizer = Tokenizer(inputCol="text", outputCol="words")
hashingTF = HashingTF(
inputCol=tokenizer.getOutputCol(), outputCol="features")
lr = LogisticRegression(maxIter=10, regParam=0.001)
pipeline = Pipeline(stages=[tokenizer, hashingTF, lr])
# Fit the pipeline to training documents.
model = pipeline.fit(training)
# Prepare test documents, which are unlabeled (id, text) tuples.
test = spark.createDataFrame([(4, "spark i j k"), (5, "l m n"), (
6, "spark hadoop spark"), (7, "apache hadoop")], columns)
# Make predictions on test documents and print columns of interest.
prediction = model.transform(test)
selected = prediction.select("id", "text", "probability", "prediction")
for row in selected.collect():
rid, text, prob, prediction = row
print("(%d, %s) --> prob=%s, prediction=%f" % (rid, text, str(prob),
prediction))
# test predict function
print(predict(spark, model,
[json.dumps((np.random.randint(1000), "spark abcd"))]))
try:
clipper = init_clipper()
try:
clipper.register_application(app_name, model_name, "strings",
"default_pred", 10000000)
time.sleep(1)
response = requests.post(
"http://localhost:1337/%s/predict" % app_name,
headers=headers,
data=json.dumps({
'input':
json.dumps((np.random.randint(1000), "spark abcd"))
}))
result = response.json()
if response.status_code != requests.codes.ok:
print("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
clipper.deploy_pyspark_model(model_name, version, predict, model,
spark.sparkContext, "strings")
time.sleep(10)
num_preds = 25
num_defaults = 0
for i in range(num_preds):
response = requests.post(
"http://localhost:1337/%s/predict" % app_name,
headers=headers,
data=json.dumps({
'input':
json.dumps((np.random.randint(1000), "spark abcd"))
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"] == True:
num_defaults += 1
if num_defaults > 0:
print("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app_name, model_name, version))
version += 1
clipper.deploy_pyspark_model(model_name, version, predict, model,
spark.sparkContext, "strings")
time.sleep(10)
num_preds = 25
num_defaults = 0
for i in range(num_preds):
response = requests.post(
"http://localhost:1337/%s/predict" % app_name,
headers=headers,
data=json.dumps({
'input':
json.dumps((np.random.randint(1000), "spark abcd"))
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"] == True:
num_defaults += 1
if num_defaults > 0:
print("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app_name, model_name, version))
except BenchmarkException as e:
print(e)
clipper.stop_all()
spark.stop()
sys.exit(1)
else:
spark.stop()
clipper.stop_all()
print("ALL TESTS PASSED")
except Exception as e:
print(e)
clipper = Clipper("localhost")
clipper.stop_all()
spark.stop()
sys.exit(1)
| apache-2.0 | -7,384,065,327,407,390,000 | 33.795122 | 93 | 0.537922 | false |
pschmitt/home-assistant | tests/components/deconz/test_binary_sensor.py | 7 | 6569 | """deCONZ binary sensor platform tests."""
from copy import deepcopy
from homeassistant.components import deconz
import homeassistant.components.binary_sensor as binary_sensor
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_VIBRATION,
)
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
SENSORS = {
"1": {
"id": "Presence sensor id",
"name": "Presence sensor",
"type": "ZHAPresence",
"state": {"dark": False, "presence": False},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "Temperature sensor id",
"name": "Temperature sensor",
"type": "ZHATemperature",
"state": {"temperature": False},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"id": "CLIP presence sensor id",
"name": "CLIP presence sensor",
"type": "CLIPPresence",
"state": {},
"config": {},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"id": "Vibration sensor id",
"name": "Vibration sensor",
"type": "ZHAVibration",
"state": {
"orientation": [1, 2, 3],
"tiltangle": 36,
"vibration": True,
"vibrationstrength": 10,
},
"config": {"on": True, "reachable": True, "temperature": 10},
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": deconz.DOMAIN}}
)
is True
)
assert deconz.DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in deconz results in no sensor entities."""
gateway = await setup_deconz_integration(hass)
assert len(gateway.deconz_ids) == 0
assert len(hass.states.async_all()) == 0
async def test_binary_sensors(hass):
"""Test successful creation of binary sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
gateway = await setup_deconz_integration(hass, get_state_response=data)
assert "binary_sensor.presence_sensor" in gateway.deconz_ids
assert "binary_sensor.temperature_sensor" not in gateway.deconz_ids
assert "binary_sensor.clip_presence_sensor" not in gateway.deconz_ids
assert "binary_sensor.vibration_sensor" in gateway.deconz_ids
assert len(hass.states.async_all()) == 3
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == "off"
assert presence_sensor.attributes["device_class"] == DEVICE_CLASS_MOTION
temperature_sensor = hass.states.get("binary_sensor.temperature_sensor")
assert temperature_sensor is None
clip_presence_sensor = hass.states.get("binary_sensor.clip_presence_sensor")
assert clip_presence_sensor is None
vibration_sensor = hass.states.get("binary_sensor.vibration_sensor")
assert vibration_sensor.state == "on"
assert vibration_sensor.attributes["device_class"] == DEVICE_CLASS_VIBRATION
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"presence": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == "on"
await gateway.async_reset()
assert len(hass.states.async_all()) == 0
async def test_allow_clip_sensor(hass):
"""Test that CLIP sensors can be allowed."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
gateway = await setup_deconz_integration(
hass,
options={deconz.gateway.CONF_ALLOW_CLIP_SENSOR: True},
get_state_response=data,
)
assert "binary_sensor.presence_sensor" in gateway.deconz_ids
assert "binary_sensor.temperature_sensor" not in gateway.deconz_ids
assert "binary_sensor.clip_presence_sensor" in gateway.deconz_ids
assert "binary_sensor.vibration_sensor" in gateway.deconz_ids
assert len(hass.states.async_all()) == 4
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == "off"
temperature_sensor = hass.states.get("binary_sensor.temperature_sensor")
assert temperature_sensor is None
clip_presence_sensor = hass.states.get("binary_sensor.clip_presence_sensor")
assert clip_presence_sensor.state == "off"
vibration_sensor = hass.states.get("binary_sensor.vibration_sensor")
assert vibration_sensor.state == "on"
hass.config_entries.async_update_entry(
gateway.config_entry, options={deconz.gateway.CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert "binary_sensor.presence_sensor" in gateway.deconz_ids
assert "binary_sensor.temperature_sensor" not in gateway.deconz_ids
assert "binary_sensor.clip_presence_sensor" not in gateway.deconz_ids
assert "binary_sensor.vibration_sensor" in gateway.deconz_ids
assert len(hass.states.async_all()) == 3
hass.config_entries.async_update_entry(
gateway.config_entry, options={deconz.gateway.CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert "binary_sensor.presence_sensor" in gateway.deconz_ids
assert "binary_sensor.temperature_sensor" not in gateway.deconz_ids
assert "binary_sensor.clip_presence_sensor" in gateway.deconz_ids
assert "binary_sensor.vibration_sensor" in gateway.deconz_ids
assert len(hass.states.async_all()) == 4
async def test_add_new_binary_sensor(hass):
"""Test that adding a new binary sensor works."""
gateway = await setup_deconz_integration(hass)
assert len(gateway.deconz_ids) == 0
state_added_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": deepcopy(SENSORS["1"]),
}
gateway.api.event_handler(state_added_event)
await hass.async_block_till_done()
assert "binary_sensor.presence_sensor" in gateway.deconz_ids
presence_sensor = hass.states.get("binary_sensor.presence_sensor")
assert presence_sensor.state == "off"
| apache-2.0 | 9,143,226,111,737,400,000 | 34.701087 | 86 | 0.658091 | false |
ctogle/chunktagger | model.py | 1 | 10733 | import torch
import numpy,functools,os,time,pdb
import util
class MultiTagger(torch.nn.Module):
'''Tagger applies an RNN (LSTM or GRU) module and a Linear
module in sequence to an embedded sentence.
It uses a concatenated hidden layer to perform multiple tagging operations
that share some dependence (e.g. part of speech and chunking).
'''
def report_architecture(self):
c = self.config
print('-'*20+'\nMultiTagger Architecture:')
print('\tSaved At: %s' % c.modelcache)
print('\tEmbedding Size (n_embed,d_embed): %d,%d' % (c.n_embed,c.d_embed))
print('\tEmbedding Dropout Ratio: %.2f' % c.emb_dp_ratio)
rnntype = self.config.rnn
if self.config.birnn:rnntype = 'Bidirectional '+rnntype
rnndims = (self.rnn.input_size,self.rnn.hidden_size,self.rnn.num_layers)
print('\tRNN Class: %s' % rnntype)
print('\tRNN Dimensions (d_embed,d_hidden,n_layers): %d,%d,%d' % rnndims)
hidstr = ','.join([str(d) for d in c.d_hidden])
print(('\tRNN d_hidden breakdown: (%s)') % hidstr)
print('\tRNN Dropout Ratio: %.2f' % self.config.rnn_dp_ratio)
lineardims = (self.decoder.in_features,self.decoder.out_features)
print('\tLinear Decoder (d_hidden,d_out): %d,%d' % lineardims)
taskdout = ','.join(['d_out_'+str(x) for x in range(len(c.d_out))])
taskdims = ','.join([str(x) for x in c.d_out])
print(('\tTasks (%s): (%s) \n'+'-'*20) % (taskdout,taskdims))
def init_hidden(self,bsize):
n_l,d_h = self.config.n_layers,sum(self.config.d_hidden)
if self.config.birnn:n_l *= 2
weight = next(self.parameters()).data
if self.config.rnn == 'GRU':
return torch.autograd.Variable(weight.new(n_l,bsize,d_h).zero_())
else:
return (torch.autograd.Variable(weight.new(n_l,bsize,d_h).zero_()),
torch.autograd.Variable(weight.new(n_l,bsize,d_h).zero_()))
def init_weights(self,initrange = 0.1):
self.encoder.weight.data.uniform_(-initrange,initrange)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange,initrange)
def __init__(self,c):
super(MultiTagger,self).__init__()
if type(c.d_hidden) == type(''):
c.d_hidden = tuple(int(v) for v in c.d_hidden.split(','))
if not len(c.d_hidden) == c.n_taggers:
amsg = '... received %d d_hidden entries; require %d ...'
raise ValueError(amsg % (len(c.d_hidden),c.n_taggers))
self.config = c
self.dropout = torch.nn.Dropout(p = c.emb_dp_ratio)
self.encoder = torch.nn.Embedding(c.n_embed,c.d_embed)
if c.rnn == 'LSTM':rnnclass = torch.nn.LSTM
elif c.rnn == 'GRU':rnnclass = torch.nn.GRU
else:raise ValueError('... unknown RNN class: %s ...' % c.rnn)
self.rnn = rnnclass(
input_size = c.d_embed,
hidden_size = sum(c.d_hidden),
num_layers = c.n_layers,
dropout = c.rnn_dp_ratio,
bidirectional = c.birnn)
decoder_isize = sum(c.d_hidden)*2 if c.birnn else sum(c.d_hidden)
self.decoder = torch.nn.Linear(decoder_isize,sum(c.d_out))
self.init_weights()
c.training_accuracy = -1.0
def forward(self,i):
'''Input i is a padded sentence LongTensor (time,batchsize)'''
emb = self.dropout(self.encoder(i))
o,h = self.rnn(emb,self.init_hidden(emb.size()[1]))
decoded = self.decoder(o.view(o.size(0)*o.size(1),o.size(2)))
scores = decoded.view(o.size(0),o.size(1),decoded.size(1))
output = []
u,w = 0,0
for j in range(len(self.config.d_out)):
v,z = self.config.d_out[j],self.config.d_hidden[j]
if self.config.rnn == 'GRU':
output.append((scores[:,:,u:u+v],h[:,:,w:w+z]))
elif self.config.rnn == 'LSTM':
output.append((scores[:,:,u:u+v],h[0][:,:,w:w+z],h[1][:,:,w:w+z]))
else:raise ValueError
u += v;w += z
return output
def work(self,s):
'''Perform tagging of a raw sentence.'''
inputs,answers = self.config.fields
t = (s,'.',',','?','\'')
t = functools.reduce(lambda s,c : s.replace(c,' '+c),t).split(' ')
n = numpy.array([inputs.vocab.stoi[w] for w in t])
i = torch.from_numpy(n).view(n.shape[0],1)
o = self.forward(torch.autograd.Variable(i))
o = [torch.max(x[0],2)[1].view(n.shape[0]).data for x in o]
o = [[a.vocab.itos[y] for y in x] for a,x in zip(answers,o)]
o = dict([(k,x) for k,x in zip(self.config.output_fields,o)])
o[self.config.target_field] = t
return o
'''For summing losses across sentences'''
sloss = lambda c,a,b : sum([c(a[:,i],b[:,i]) for i in range(a.size()[1])])
def train_batch(tagger,crit,opt,batch,bdataf):
'''Perform training on a single batch of examples,
returning the number of correct answers'''
opt.zero_grad()
i = batch.__getattribute__(tagger.config.target_field)
tdata,bdata = tagger(i),bdataf(batch)
loss = sum([sloss(crit,o[0],c) for o,c in zip(tdata,bdata)])
loss.backward();opt.step()
return [util.ncorrect(o[0],c) for o,c in zip(tdata,bdata)]
def train_epoch(tagger,crit,opt,batcher,epoch,stime,bdataf,prog_f):
'''Perform a training epoch given an iterator of training batches,
returning the accuracy of the model on the data set.'''
if prog_f:prog_f(epoch,-1,len(batcher),[-1]*tagger.config.n_taggers,0,stime)
batcher.init_epoch()
corrects,total = [0]*tagger.config.n_taggers,0
for j,batch in enumerate(batcher):
newcorrects = train_batch(tagger,crit,opt,batch,bdataf)
corrects = [x+y for x,y in zip(corrects,newcorrects)]
total += batch.batch_size*bdataf(batch)[0].size()[0]
if prog_f:prog_f(epoch,j,len(batcher),corrects,total,stime)
if time.time()-stime > tagger.config.timeout:raise KeyboardInterrupt
return 100.0*sum(corrects)/(total*tagger.config.n_taggers)
def train(tagger,train_i,test_i,bdataf,prog_h,prog_f):
'''Perform training of the model given an iterator of training batches.
Exit the training process early on KeyboardInterrupt, or if accuarcy
improvement is sufficiently slow.
Save the model between training epochs or upon early exit.
Test the accuracy of the model on an iterator of test batches when
training is complete.'''
config = tagger.config
lossf = torch.nn.CrossEntropyLoss()
if hasattr(torch.optim,config.optimizer):
optclass = torch.optim.__getattribute__(config.optimizer)
opt = optclass(tagger.parameters(),lr = config.lr)
else:raise ValueError('... unavailable optimizer: %s ...' % config.optimizer)
test_required = False
improvement_threshold = 0.0
if not config.training_accuracy < config.targetaccuracy and prog_f:
print(prog_h)
stime = time.time()
for j in range(config.epochs):
try:
if not config.training_accuracy < config.targetaccuracy:
print('... target accuracy has been met ... ending training ...')
break
accuracy = train_epoch(tagger,lossf,opt,train_i,j,stime,bdataf,prog_f)
improvement = accuracy-config.training_accuracy
config.training_accuracy = accuracy
test_required = True
torch.save(tagger,config.modelcache)
if improvement < improvement_threshold:
print('... improvement is quite low ... ending training ...')
break
except KeyboardInterrupt:
if prog_f:prog_f(j,0,1,[-1]*config.n_taggers,0,stime)
print('... training forcefully exited ...')
torch.save(tagger,config.modelcache)
break
return test_required
def test(tagger,batcher,bdataf,prog_f):
'''Perform testing given an iterator of testing batches,
returning the accuracy of the model on the data set.'''
config = tagger.config
stime = time.time()
prog_f(0,-1,len(batcher),[-1]*config.n_taggers,0,stime)
tagger.eval();batcher.init_epoch()
corrects,total = [0]*config.n_taggers,0
for j,batch in enumerate(batcher):
i = batch.__getattribute__(tagger.config.target_field)
tdata,bdata = tagger(i),bdataf(batch)
newcorrects = [util.ncorrect(o[0],c) for o,c in zip(tdata,bdata)]
corrects = [x+y for x,y in zip(corrects,newcorrects)]
total += batch.batch_size*bdataf(batch)[0].size()[0]
prog_f(0,j,len(batcher),corrects,total,stime)
return 100.0*sum(corrects)/(total*config.n_taggers)
def newmodel(config,data):
'''Create or load, train, and test an instance of a MultiTagger.'''
train_i,test_i = data['dataset_iters']
inputs,answers = data['fields']
fkeys = train_i.dataset.fields.keys()
bkeys = [k for k in fkeys if train_i.dataset.fields[k] in answers]
bdataf = lambda b : tuple(b.__getattribute__(s) for s in bkeys)
config.output_fields = tuple(bkeys)
config.n_taggers = len(bkeys)
if not config.fresh and os.path.exists(config.modelcache):
if config.gpu >= 0:
map_location = lambda storage,locatoin : storage.cuda(config.gpu)
else:map_location = lambda storage,location : None
tagger = torch.load(config.modelcache,map_location = map_location)
tagger.config.targetaccuracy = config.targetaccuracy
print('... loaded cached model: %s ...' % config.modelcache)
else:
tagger = MultiTagger(config)
if config.word_vectors:
tagger.encoder.weight.data = inputs.vocab.vectors
if config.gpu >= 0 and torch.cuda.is_available():tagger.cuda()
print('... created new model (%s) ...' % config.modelcache)
if config.epochs == 0:
print('... new model requires training ...')
print('... resetting epochs from 0 to 100 ...')
config.epochs = 100
tagger.report_architecture()
prog_h,prog_f = util.get_progress_function(bkeys)
if train_i and config.epochs:
print('... training model ...')
tagger.train()
test_required = train(tagger,train_i,test_i,bdataf,prog_h,prog_f)
tagger.eval()
if test_i and test_required:
print('... testing model ...')
if prog_f:print(prog_h)
accuracy = test(tagger,test_i,bdataf,prog_f)
tagger.config.testing_accuracy = accuracy
torch.save(tagger,config.modelcache)
print('... final model task-averaged accuracy: %.2f ...' % accuracy)
tagger.config.fields = (inputs,answers)
return tagger
| mit | 7,804,109,910,510,477,000 | 42.453441 | 82 | 0.611665 | false |
Aravinthu/odoo | addons/account_asset/wizard/asset_depreciation_confirmation_wizard.py | 16 | 1186 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class AssetDepreciationConfirmationWizard(models.TransientModel):
_name = "asset.depreciation.confirmation.wizard"
_description = "asset.depreciation.confirmation.wizard"
date = fields.Date('Account Date', required=True, help="Choose the period for which you want to automatically post the depreciation lines of running assets", default=fields.Date.context_today)
@api.multi
def asset_compute(self):
self.ensure_one()
context = self._context
created_move_ids = self.env['account.asset.asset'].compute_generated_entries(self.date, asset_type=context.get('asset_type'))
return {
'name': _('Created Asset Moves') if context.get('asset_type') == 'purchase' else _('Created Revenue Moves'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'domain': "[('id','in',[" + ','.join(str(id) for id in created_move_ids) + "])]",
'type': 'ir.actions.act_window',
}
| agpl-3.0 | -2,867,502,352,904,528,000 | 42.925926 | 196 | 0.633221 | false |
Orav/kbengine | kbe/res/scripts/common/Lib/codecs.py | 2 | 37066 | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import builtins, sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to blacklist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at 0x%x>" % \
(self.__class__.__module__, self.__class__.__name__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private codepoints U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
elif size >= 0:
if len(self.charbuffer) >= size:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with codecs.open(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances provide a frontend - backend
view of encoding data.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the stream is first decoded into an
intermediate format (which is dependent on the given codec
combination) and then written to the stream using an instance
of the provided Writer class.
In the other direction, data is read from the stream using a
Reader instance and then return encoded data to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = ''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='rb', encoding=None, errors='strict', buffering=1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Files are always opened in binary mode, even if no binary mode
was specified. This is done to avoid data loss due to encodings
using 8-bit values. The default file mode is 'rb' meaning to
open the file in binary read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to line buffered.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Strings written to the wrapped file are interpreted according
to the given data_encoding and then written to the original
file as string using file_encoding. The intermediate encoding
will usually be Unicode but depends on the specified codecs.
Strings are read from the file using file_encoding and then
passed back to the caller as string using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using a IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using a IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| lgpl-3.0 | -960,728,129,426,200,400 | 31.392793 | 80 | 0.595991 | false |
Nexenta/cinder | cinder/db/sqlalchemy/migrate_repo/versions/046_cinder_init.py | 2 | 22266 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_config import cfg
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Index
from sqlalchemy import Integer, MetaData, String, Table, Text, UniqueConstraint
# Get default values via config. The defaults will either
# come from the default values set in the quota option
# configuration or via cinder.conf if the user has configured
# default values for quotas there.
CONF = cfg.CONF
CONF.import_opt('quota_volumes', 'cinder.quota')
CONF.import_opt('quota_snapshots', 'cinder.quota')
CONF.import_opt('quota_gigabytes', 'cinder.quota')
CONF.import_opt('quota_consistencygroups', 'cinder.quota')
CLASS_NAME = 'default'
CREATED_AT = datetime.datetime.now() # noqa
def define_tables(meta):
services = Table(
'services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(255)),
Column('binary', String(255)),
Column('topic', String(255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('availability_zone', String(255)),
Column('disabled_reason', String(255)),
Column('modified_at', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consistencygroups = Table(
'consistencygroups', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('host', String(255)),
Column('availability_zone', String(255)),
Column('name', String(255)),
Column('description', String(255)),
Column('volume_type_id', String(255)),
Column('status', String(255)),
Column('cgsnapshot_id', String(36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cgsnapshots = Table(
'cgsnapshots', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('consistencygroup_id', String(36),
ForeignKey('consistencygroups.id'),
nullable=False),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('name', String(255)),
Column('description', String(255)),
Column('status', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table(
'volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('ec2_id', String(255)),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('host', String(255)),
Column('size', Integer),
Column('availability_zone', String(255)),
Column('status', String(255)),
Column('attach_status', String(255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(255)),
Column('display_description', String(255)),
Column('provider_location', String(256)),
Column('provider_auth', String(256)),
Column('snapshot_id', String(36)),
Column('volume_type_id', String(36)),
Column('source_volid', String(36)),
Column('bootable', Boolean),
Column('provider_geometry', String(255)),
Column('_name_id', String(36)),
Column('encryption_key_id', String(36)),
Column('migration_status', String(255)),
Column('replication_status', String(255)),
Column('replication_extended_status', String(255)),
Column('replication_driver_data', String(255)),
Column('consistencygroup_id', String(36),
ForeignKey('consistencygroups.id')),
Column('provider_id', String(255)),
Column('multiattach', Boolean),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_attachment = Table(
'volume_attachment', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('volume_id', String(36), ForeignKey('volumes.id'),
nullable=False),
Column('attached_host', String(255)),
Column('instance_uuid', String(36)),
Column('mountpoint', String(255)),
Column('attach_time', DateTime),
Column('detach_time', DateTime),
Column('attach_mode', String(36)),
Column('attach_status', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table(
'snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('volume_id', String(36),
ForeignKey('volumes.id', name='snapshots_volume_id_fkey'),
nullable=False),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('status', String(255)),
Column('progress', String(255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(255)),
Column('display_description', String(255)),
Column('provider_location', String(255)),
Column('encryption_key_id', String(36)),
Column('volume_type_id', String(36)),
Column('cgsnapshot_id', String(36),
ForeignKey('cgsnapshots.id')),
Column('provider_id', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_metadata = Table(
'snapshot_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('snapshot_id', String(36), ForeignKey('snapshots.id'),
nullable=False),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quality_of_service_specs = Table(
'quality_of_service_specs', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('specs_id', String(36),
ForeignKey('quality_of_service_specs.id')),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_types = Table(
'volume_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('name', String(255)),
Column('qos_specs_id', String(36),
ForeignKey('quality_of_service_specs.id')),
Column('is_public', Boolean),
Column('description', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_type_projects = Table(
'volume_type_projects', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('volume_type_id', String(36),
ForeignKey('volume_types.id')),
Column('project_id', String(255)),
Column('deleted', Boolean(create_constraint=True, name=None)),
UniqueConstraint('volume_type_id', 'project_id', 'deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_metadata = Table(
'volume_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_id', String(36), ForeignKey('volumes.id'),
nullable=False),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_type_extra_specs = Table(
'volume_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_type_id', String(36),
ForeignKey('volume_types.id',
name='volume_type_extra_specs_ibfk_1'),
nullable=False),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table(
'quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('project_id', String(255)),
Column('resource', String(255), nullable=False),
Column('hard_limit', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
iscsi_targets = Table(
'iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(255)),
Column('volume_id', String(36), ForeignKey('volumes.id'),
nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table(
'quota_classes', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('class_name', String(255), index=True),
Column('resource', String(255)),
Column('hard_limit', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
quota_usages = Table(
'quota_usages', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('project_id', String(255), index=True),
Column('resource', String(255)),
Column('in_use', Integer(), nullable=False),
Column('reserved', Integer(), nullable=False),
Column('until_refresh', Integer(), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table(
'reservations', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True,
name=None)),
Column('id', Integer(), primary_key=True),
Column('uuid', String(36), nullable=False),
Column('usage_id',
Integer(),
ForeignKey('quota_usages.id'),
nullable=False),
Column('project_id', String(255), index=True),
Column('resource', String(255)),
Column('delta', Integer(), nullable=False),
Column('expire', DateTime(timezone=False)),
Index('reservations_deleted_expire_idx',
'deleted', 'expire'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
volume_glance_metadata = Table(
'volume_glance_metadata',
meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), ForeignKey('volumes.id')),
Column('snapshot_id', String(36),
ForeignKey('snapshots.id')),
Column('key', String(255)),
Column('value', Text),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
backups = Table(
'backups', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', String(36), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('user_id', String(255)),
Column('project_id', String(255)),
Column('host', String(255)),
Column('availability_zone', String(255)),
Column('display_name', String(255)),
Column('display_description', String(255)),
Column('container', String(255)),
Column('status', String(255)),
Column('fail_reason', String(255)),
Column('service_metadata', String(255)),
Column('service', String(255)),
Column('size', Integer()),
Column('object_count', Integer()),
Column('parent_id', String(36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
transfers = Table(
'transfers', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean),
Column('id', String(36), primary_key=True, nullable=False),
Column('volume_id', String(36), ForeignKey('volumes.id'),
nullable=False),
Column('display_name', String(255)),
Column('salt', String(255)),
Column('crypt_hash', String(255)),
Column('expires_at', DateTime(timezone=False)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
# Sqlite needs to handle nullable differently
is_nullable = (meta.bind.name == 'sqlite')
encryption = Table(
'encryption', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('cipher', String(255)),
Column('control_location', String(255), nullable=is_nullable),
Column('key_size', Integer),
Column('provider', String(255), nullable=is_nullable),
# NOTE(joel-coffman): The volume_type_id must be unique or else the
# referenced volume type becomes ambiguous. That is, specifying the
# volume type is not sufficient to identify a particular encryption
# scheme unless each volume type is associated with at most one
# encryption scheme.
Column('volume_type_id', String(36), nullable=is_nullable),
# NOTE (smcginnis): nullable=True triggers this to not set a default
# value, but since it's a primary key the resulting schema will end up
# still being NOT NULL. This is avoiding a case in MySQL where it will
# otherwise set this to NOT NULL DEFAULT ''. May be harmless, but
# inconsistent with previous schema.
Column('encryption_id', String(36), primary_key=True, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_admin_metadata = Table(
'volume_admin_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_id', String(36), ForeignKey('volumes.id'),
nullable=False),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
initiator_data = Table(
'driver_initiator_data', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('id', Integer, primary_key=True, nullable=False),
Column('initiator', String(255), index=True, nullable=False),
Column('namespace', String(255), nullable=False),
Column('key', String(255), nullable=False),
Column('value', String(255)),
UniqueConstraint('initiator', 'namespace', 'key'),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
return [consistencygroups,
cgsnapshots,
volumes,
volume_attachment,
snapshots,
snapshot_metadata,
quality_of_service_specs,
volume_types,
volume_type_projects,
iscsi_targets,
quotas,
services,
volume_metadata,
volume_type_extra_specs,
quota_classes,
quota_usages,
reservations,
volume_glance_metadata,
backups,
transfers,
encryption,
volume_admin_metadata,
initiator_data]
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# create all tables
# Take care on create order for those with FK dependencies
tables = define_tables(meta)
for table in tables:
table.create()
if migrate_engine.name == "mysql":
tables = ["consistencygroups",
"cgsnapshots",
"snapshots",
"snapshot_metadata",
"quality_of_service_specs",
"volume_types",
"volume_type_projects",
"volumes",
"volume_attachment",
"iscsi_targets",
"migrate_version",
"quotas",
"services",
"volume_metadata",
"volume_type_extra_specs",
"quota_classes",
"quota_usages",
"reservations",
"volume_glance_metadata",
"backups",
"transfers",
"encryption",
"volume_admin_metadata",
"driver_initiator_data"]
migrate_engine.execute("SET foreign_key_checks = 0")
for table in tables:
migrate_engine.execute(
"ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table)
migrate_engine.execute("SET foreign_key_checks = 1")
migrate_engine.execute(
"ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
migrate_engine.url.database)
migrate_engine.execute("ALTER TABLE %s Engine=InnoDB" % table)
# Set default quota class values
quota_classes = Table('quota_classes', meta, autoload=True)
qci = quota_classes.insert()
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'volumes',
'hard_limit': CONF.quota_volumes,
'deleted': False, })
# Set default snapshots
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'snapshots',
'hard_limit': CONF.quota_snapshots,
'deleted': False, })
# Set default gigabytes
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'gigabytes',
'hard_limit': CONF.quota_gigabytes,
'deleted': False, })
qci.execute({'created_at': CREATED_AT,
'class_name': CLASS_NAME,
'resource': 'consistencygroups',
'hard_limit': CONF.quota_consistencygroups,
'deleted': False, })
| apache-2.0 | 8,879,879,885,424,088,000 | 37.790941 | 79 | 0.579673 | false |
shadowmint/nwidget | lib/cocos2d-0.5.5/cocos/audio/pygame/__init__.py | 1 | 3182 | ## pygame - Python Game Library
## Copyright (C) 2000-2001 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## [email protected]
'''Top-level Pygame module.
Pygame is a set of Python modules designed for writing games.
It is written on top of the excellent SDL library. This allows you
to create fully featured games and multimedia programs in the Python
language. The package is highly portable, with games running on
Windows, MacOS, OS X, BeOS, FreeBSD, IRIX, and Linux.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 899 2006-08-04 16:52:18Z aholkner $'
import os
import sys
class MissingModule:
def __init__(self, name, info='', urgent=0):
self.name = name
self.info = str(info)
self.urgent = urgent
if urgent:
self.warn()
def __getattr__(self, var):
if not self.urgent:
self.warn()
self.urgent = 1
MissingPygameModule = "%s module not available" % self.name
raise NotImplementedError, MissingPygameModule
def __nonzero__(self):
return 0
def warn(self):
if self.urgent: type = 'import'
else: type = 'use'
message = '%s %s: %s' % (type, self.name, self.info)
try:
import warnings
if self.urgent: level = 4
else: level = 3
warnings.warn(message, RuntimeWarning, level)
except ImportError:
print message
#we need to import like this, each at a time. the cleanest way to import
#our modules is with the import command (not the __import__ function)
#first, the "required" modules
#from pygame.array import *
from cocos.audio.pygame.base import *
from cocos.audio.pygame.version import *
__version__ = ver
#next, the "standard" modules
#we still allow them to be missing for stripped down pygame distributions
try: import cocos.audio.pygame.mixer
except (ImportError,IOError), msg:mixer=MissingModule("mixer", msg, 0)
#there's also a couple "internal" modules not needed
#by users, but putting them here helps "dependency finder"
#programs get everything they need (like py2exe)
try: import cocos.audio.pygame.mixer_music; del cocos.audio.pygame.mixer_music
except (ImportError,IOError):pass
#cleanup namespace
del os, sys, #TODO rwobject, surflock, MissingModule, copy_reg
| apache-2.0 | 1,151,745,897,005,244,400 | 32.967033 | 79 | 0.662476 | false |
sunqm/pyscf | pyscf/fci/direct_spin1_symm.py | 1 | 19417 | #!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Different FCI solvers are implemented to support different type of symmetry.
Symmetry
File Point group Spin singlet Real hermitian* Alpha/beta degeneracy
direct_spin0_symm Yes Yes Yes Yes
direct_spin1_symm Yes No Yes Yes
direct_spin0 No Yes Yes Yes
direct_spin1 No No Yes Yes
direct_uhf No No Yes No
direct_nosym No No No** Yes
* Real hermitian Hamiltonian implies (ij|kl) = (ji|kl) = (ij|lk) = (ji|lk)
** Hamiltonian is real but not hermitian, (ij|kl) != (ji|kl) ...
'''
import sys
import ctypes
import numpy
from pyscf import ao2mo
from pyscf import lib
from pyscf.lib import logger
from pyscf import symm
from pyscf.fci import cistring
from pyscf.fci import direct_spin1
from pyscf.fci import addons
from pyscf.fci.spin_op import contract_ss
from pyscf.fci.addons import _unpack_nelec
from pyscf import __config__
libfci = lib.load_library('libfci')
TOTIRREPS = 8
def contract_1e(f1e, fcivec, norb, nelec, link_index=None, orbsym=None):
return direct_spin1.contract_1e(f1e, fcivec, norb, nelec, link_index)
# Note eri is NOT the 2e hamiltonian matrix, the 2e hamiltonian is
# h2e = eri_{pq,rs} p^+ q r^+ s
# = (pq|rs) p^+ r^+ s q - (pq|rs) \delta_{qr} p^+ s
# so eri is defined as
# eri_{pq,rs} = (pq|rs) - (1/Nelec) \sum_q (pq|qs)
# to restore the symmetry between pq and rs,
# eri_{pq,rs} = (pq|rs) - (.5/Nelec) [\sum_q (pq|qs) + \sum_p (pq|rp)]
# Please refer to the treatment in direct_spin1.absorb_h1e
def contract_2e(eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
if orbsym is None:
return direct_spin1.contract_2e(eri, fcivec, norb, nelec, link_index)
eri = ao2mo.restore(4, eri, norb)
neleca, nelecb = _unpack_nelec(nelec)
link_indexa, link_indexb = direct_spin1._unpack(norb, nelec, link_index)
na, nlinka = link_indexa.shape[:2]
nb, nlinkb = link_indexb.shape[:2]
eri_irs, rank_eri, irrep_eri = reorder_eri(eri, norb, orbsym)
strsa = cistring.gen_strings4orblist(range(norb), neleca)
aidx, link_indexa = gen_str_irrep(strsa, orbsym, link_indexa, rank_eri, irrep_eri)
if neleca == nelecb:
bidx, link_indexb = aidx, link_indexa
else:
strsb = cistring.gen_strings4orblist(range(norb), nelecb)
bidx, link_indexb = gen_str_irrep(strsb, orbsym, link_indexb, rank_eri, irrep_eri)
Tirrep = ctypes.c_void_p*TOTIRREPS
linka_ptr = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in link_indexa])
linkb_ptr = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in link_indexb])
eri_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in eri_irs])
dimirrep = (ctypes.c_int*TOTIRREPS)(*[x.shape[0] for x in eri_irs])
fcivec_shape = fcivec.shape
fcivec = fcivec.reshape((na,nb), order='C')
ci1new = numpy.zeros_like(fcivec)
nas = (ctypes.c_int*TOTIRREPS)(*[x.size for x in aidx])
nbs = (ctypes.c_int*TOTIRREPS)(*[x.size for x in bidx])
# aa, ab
ci0 = []
ci1 = []
for ir in range(TOTIRREPS):
ma, mb = aidx[ir].size, bidx[wfnsym ^ ir].size
ci0.append(numpy.zeros((ma,mb)))
ci1.append(numpy.zeros((ma,mb)))
if ma > 0 and mb > 0:
lib.take_2d(fcivec, aidx[ir], bidx[wfnsym ^ ir], out=ci0[ir])
ci0_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci0])
ci1_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci1])
libfci.FCIcontract_2e_symm1(eri_ptrs, ci0_ptrs, ci1_ptrs,
ctypes.c_int(norb), nas, nbs,
ctypes.c_int(nlinka), ctypes.c_int(nlinkb),
linka_ptr, linkb_ptr, dimirrep,
ctypes.c_int(wfnsym))
for ir in range(TOTIRREPS):
if ci0[ir].size > 0:
lib.takebak_2d(ci1new, ci1[ir], aidx[ir], bidx[wfnsym ^ ir])
# bb, ba
ci0T = []
for ir in range(TOTIRREPS):
mb, ma = bidx[ir].size, aidx[wfnsym ^ ir].size
ci0T.append(numpy.zeros((mb,ma)))
if ma > 0 and mb > 0:
lib.transpose(ci0[wfnsym ^ ir], out=ci0T[ir])
ci0, ci0T = ci0T, None
ci1 = [numpy.zeros_like(x) for x in ci0]
ci0_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci0])
ci1_ptrs = Tirrep(*[x.ctypes.data_as(ctypes.c_void_p) for x in ci1])
libfci.FCIcontract_2e_symm1(eri_ptrs, ci0_ptrs, ci1_ptrs,
ctypes.c_int(norb), nbs, nas,
ctypes.c_int(nlinkb), ctypes.c_int(nlinka),
linkb_ptr, linka_ptr, dimirrep,
ctypes.c_int(wfnsym))
for ir in range(TOTIRREPS):
if ci0[ir].size > 0:
lib.takebak_2d(ci1new, lib.transpose(ci1[ir]), aidx[wfnsym ^ ir], bidx[ir])
return ci1new.reshape(fcivec_shape)
def kernel(h1e, eri, norb, nelec, ci0=None, level_shift=1e-3, tol=1e-10,
lindep=1e-14, max_cycle=50, max_space=12, nroots=1,
davidson_only=False, pspace_size=400, orbsym=None, wfnsym=None,
ecore=0, **kwargs):
assert(len(orbsym) == norb)
cis = FCISolver(None)
cis.level_shift = level_shift
cis.conv_tol = tol
cis.lindep = lindep
cis.max_cycle = max_cycle
cis.max_space = max_space
cis.nroots = nroots
cis.davidson_only = davidson_only
cis.pspace_size = pspace_size
cis.orbsym = orbsym
cis.wfnsym = wfnsym
unknown = {}
for k, v in kwargs.items():
if not hasattr(cis, k):
unknown[k] = v
setattr(cis, k, v)
if unknown:
sys.stderr.write('Unknown keys %s for FCI kernel %s\n' %
(str(unknown.keys()), __name__))
e, c = cis.kernel(h1e, eri, norb, nelec, ci0, ecore=ecore, **unknown)
return e, c
make_rdm1 = direct_spin1.make_rdm1
make_rdm1s = direct_spin1.make_rdm1s
make_rdm12 = direct_spin1.make_rdm12
trans_rdm1s = direct_spin1.trans_rdm1s
trans_rdm1 = direct_spin1.trans_rdm1
trans_rdm12 = direct_spin1.trans_rdm12
def energy(h1e, eri, fcivec, norb, nelec, link_index=None, orbsym=None, wfnsym=0):
h2e = direct_spin1.absorb_h1e(h1e, eri, norb, nelec) * .5
ci1 = contract_2e(h2e, fcivec, norb, nelec, link_index, orbsym, wfnsym)
return numpy.dot(fcivec.ravel(), ci1.ravel())
def _id_wfnsym(cisolver, norb, nelec, orbsym, wfnsym):
'''Guess wfnsym or convert wfnsym to symmetry ID if it's a symmetry label'''
if wfnsym is None:
neleca, nelecb = _unpack_nelec(nelec)
wfnsym = 0 # Ag, A1 or A
for i in orbsym[nelecb:neleca]:
wfnsym ^= i % 10
elif isinstance(wfnsym, str):
wfnsym = symm.irrep_name2id(cisolver.mol.groupname, wfnsym)
# % 10 to convert irrep_ids to irrep of D2h
return wfnsym % 10
def _gen_strs_irrep(strs, orbsym):
# % 10 to convert irrep_ids to irrep of D2h
orbsym_in_d2h = numpy.asarray(orbsym) % 10
irreps = numpy.zeros(len(strs), dtype=numpy.int32)
if isinstance(strs, cistring.OIndexList):
nocc = strs.shape[1]
for i in range(nocc):
irreps ^= orbsym_in_d2h[strs[:,i]]
else:
for i, ir in enumerate(orbsym_in_d2h):
irreps[numpy.bitwise_and(strs, 1 << i) > 0] ^= ir
return irreps
def _get_init_guess(airreps, birreps, nroots, hdiag, orbsym, wfnsym=0):
na = len(airreps)
nb = len(birreps)
ci0 = []
iroot = 0
for addr in numpy.argsort(hdiag):
x = numpy.zeros((na*nb))
addra = addr // nb
addrb = addr % nb
if airreps[addra] ^ birreps[addrb] == wfnsym:
x[addr] = 1
ci0.append(x)
iroot += 1
if iroot >= nroots:
break
try:
# Add noise
ci0[0][0 ] += 1e-5
ci0[0][-1] -= 1e-5
except IndexError:
raise IndexError('Configuration of required symmetry (wfnsym=%d) not found' % wfnsym)
return ci0
def get_init_guess(norb, nelec, nroots, hdiag, orbsym, wfnsym=0):
neleca, nelecb = _unpack_nelec(nelec)
strsa = cistring.gen_strings4orblist(range(norb), neleca)
airreps = birreps = _gen_strs_irrep(strsa, orbsym)
if neleca != nelecb:
strsb = cistring.gen_strings4orblist(range(norb), nelecb)
birreps = _gen_strs_irrep(strsb, orbsym)
return _get_init_guess(airreps, birreps, nroots, hdiag, orbsym, wfnsym)
def reorder_eri(eri, norb, orbsym):
if orbsym is None:
return [eri], numpy.arange(norb), numpy.zeros(norb,dtype=numpy.int32)
# % 10 to map irrep IDs of Dooh or Coov, etc. to irreps of D2h, C2v
orbsym = numpy.asarray(orbsym) % 10
# irrep of (ij| pair
trilirrep = (orbsym[:,None] ^ orbsym)[numpy.tril_indices(norb)]
# and the number of occurence for each irrep
dimirrep = numpy.asarray(numpy.bincount(trilirrep), dtype=numpy.int32)
# we sort the irreps of (ij| pair, to group the pairs which have same irreps
# "order" is irrep-id-sorted index. The (ij| paired is ordered that the
# pair-id given by order[0] comes first in the sorted pair
# "rank" is a sorted "order". Given nth (ij| pair, it returns the place(rank)
# of the sorted pair
old_eri_irrep = numpy.asarray(trilirrep, dtype=numpy.int32)
rank_in_irrep = numpy.empty_like(old_eri_irrep)
p0 = 0
eri_irs = [numpy.zeros((0,0))] * TOTIRREPS
for ir, nnorb in enumerate(dimirrep):
idx = numpy.asarray(numpy.where(trilirrep == ir)[0], dtype=numpy.int32)
rank_in_irrep[idx] = numpy.arange(nnorb, dtype=numpy.int32)
eri_irs[ir] = lib.take_2d(eri, idx, idx)
p0 += nnorb
return eri_irs, rank_in_irrep, old_eri_irrep
def gen_str_irrep(strs, orbsym, link_index, rank_eri, irrep_eri):
airreps = _gen_strs_irrep(strs, orbsym)
na = len(airreps)
rank = numpy.zeros(na, dtype=numpy.int32)
aidx = [numpy.zeros(0,dtype=numpy.int32)] * TOTIRREPS
for ir in range(TOTIRREPS):
aidx[ir] = numpy.where(airreps == ir)[0]
ma = len(aidx[ir])
if ma > 0:
rank[aidx[ir]] = numpy.arange(ma, dtype=numpy.int32)
link_index = link_index.copy()
link_index[:,:,1] = irrep_eri[link_index[:,:,0]]
link_index[:,:,0] = rank_eri[link_index[:,:,0]]
link_index[:,:,2] = rank[link_index[:,:,2]]
link_index = [link_index.take(aidx[ir], axis=0) for ir in range(TOTIRREPS)]
return aidx, link_index
class FCISolver(direct_spin1.FCISolver):
davidson_only = getattr(__config__, 'fci_direct_spin1_symm_FCI_davidson_only', True)
# pspace may break point group symmetry
pspace_size = getattr(__config__, 'fci_direct_spin1_symm_FCI_pspace_size', 0)
def __init__(self, mol=None, **kwargs):
direct_spin1.FCISolver.__init__(self, mol, **kwargs)
# wfnsym will be guessed based on initial guess if it is None
self.wfnsym = None
def dump_flags(self, verbose=None):
direct_spin1.FCISolver.dump_flags(self, verbose)
log = logger.new_logger(self, verbose)
if isinstance(self.wfnsym, str):
log.info('Input CI wfn symmetry = %s', self.wfnsym)
elif isinstance(self.wfnsym, (int, numpy.number)):
try:
log.info('Input CI wfn symmetry = %s',
symm.irrep_id2name(self.mol.groupname, self.wfnsym))
except KeyError:
raise RuntimeError('FCISolver cannot find mwfnsym Id %s in group %s. '
'This might be caused by the projection from '
'high-symmetry group to D2h symmetry.' %
(self.wfnsym, self.mol.groupname))
else:
log.info('CI wfn symmetry = %s', self.wfnsym)
return self
def absorb_h1e(self, h1e, eri, norb, nelec, fac=1):
nelec = _unpack_nelec(nelec, self.spin)
return direct_spin1.absorb_h1e(h1e, eri, norb, nelec, fac)
def make_hdiag(self, h1e, eri, norb, nelec):
nelec = _unpack_nelec(nelec, self.spin)
return direct_spin1.make_hdiag(h1e, eri, norb, nelec)
def pspace(self, h1e, eri, norb, nelec, hdiag, np=400):
nelec = _unpack_nelec(nelec, self.spin)
return direct_spin1.pspace(h1e, eri, norb, nelec, hdiag, np)
def contract_1e(self, f1e, fcivec, norb, nelec, link_index=None, **kwargs):
nelec = _unpack_nelec(nelec, self.spin)
return contract_1e(f1e, fcivec, norb, nelec, link_index, **kwargs)
def contract_2e(self, eri, fcivec, norb, nelec, link_index=None,
orbsym=None, wfnsym=None, **kwargs):
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
wfnsym = _id_wfnsym(self, norb, nelec, orbsym, wfnsym)
nelec = _unpack_nelec(nelec, self.spin)
return contract_2e(eri, fcivec, norb, nelec, link_index, orbsym, wfnsym, **kwargs)
def get_init_guess(self, norb, nelec, nroots, hdiag):
wfnsym = _id_wfnsym(self, norb, nelec, self.orbsym, self.wfnsym)
nelec = _unpack_nelec(nelec, self.spin)
return get_init_guess(norb, nelec, nroots, hdiag, self.orbsym, wfnsym)
def guess_wfnsym(self, norb, nelec, fcivec=None, orbsym=None, wfnsym=None,
**kwargs):
'''
Guess point group symmetry of the FCI wavefunction. If fcivec is
given, the symmetry of fcivec is used. Otherwise the symmetry is
based on the HF determinant.
'''
if orbsym is None:
orbsym = self.orbsym
verbose = kwargs.get('verbose', None)
log = logger.new_logger(self, verbose)
nelec = _unpack_nelec(nelec, self.spin)
if fcivec is None:
# guess wfnsym if initial guess is not given
wfnsym = _id_wfnsym(self, norb, nelec, orbsym, wfnsym)
log.debug('Guessing CI wfn symmetry = %s', wfnsym)
elif wfnsym is None:
wfnsym = addons.guess_wfnsym(fcivec, norb, nelec, orbsym)
log.debug('Guessing CI wfn symmetry = %s', wfnsym)
else:
# verify if the input wfnsym is consistent with the symmetry of fcivec
neleca, nelecb = nelec
strsa = numpy.asarray(cistring.make_strings(range(norb), neleca))
strsb = numpy.asarray(cistring.make_strings(range(norb), nelecb))
na, nb = strsa.size, strsb.size
orbsym_in_d2h = numpy.asarray(orbsym) % 10
airreps = numpy.zeros(na, dtype=numpy.int32)
birreps = numpy.zeros(nb, dtype=numpy.int32)
for i, ir in enumerate(orbsym_in_d2h):
airreps[numpy.bitwise_and(strsa, 1 << i) > 0] ^= ir
birreps[numpy.bitwise_and(strsb, 1 << i) > 0] ^= ir
wfnsym = _id_wfnsym(self, norb, nelec, orbsym, wfnsym)
mask = (airreps.reshape(-1,1) ^ birreps) == wfnsym
if isinstance(fcivec, numpy.ndarray) and fcivec.ndim <= 2:
fcivec = [fcivec]
if all(abs(c.reshape(na, nb)[mask]).max() < 1e-5 for c in fcivec):
raise RuntimeError('Input wfnsym is not consistent with fcivec coefficients')
return wfnsym
def kernel(self, h1e, eri, norb, nelec, ci0=None,
tol=None, lindep=None, max_cycle=None, max_space=None,
nroots=None, davidson_only=None, pspace_size=None,
orbsym=None, wfnsym=None, ecore=0, **kwargs):
if nroots is None: nroots = self.nroots
if orbsym is None: orbsym = self.orbsym
if wfnsym is None: wfnsym = self.wfnsym
if self.verbose >= logger.WARN:
self.check_sanity()
self.norb = norb
self.nelec = nelec
wfnsym = self.guess_wfnsym(norb, nelec, ci0, orbsym, wfnsym, **kwargs)
with lib.temporary_env(self, orbsym=orbsym, wfnsym=wfnsym):
e, c = direct_spin1.kernel_ms1(self, h1e, eri, norb, nelec, ci0, None,
tol, lindep, max_cycle, max_space,
nroots, davidson_only, pspace_size,
ecore=ecore, **kwargs)
self.eci, self.ci = e, c
return e, c
FCI = FCISolver
if __name__ == '__main__':
from functools import reduce
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'sto-3g',
'O': 'sto-3g',}
mol.symmetry = 1
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron-1
h1e = reduce(numpy.dot, (m.mo_coeff.T, scf.hf.get_hcore(mol), m.mo_coeff))
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
numpy.random.seed(1)
na = cistring.num_strings(norb, nelec//2+1)
nb = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,nb))
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
cis = FCISolver(mol)
cis.orbsym = orbsym
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, cis.orbsym, wfnsym=0)
ci1 = cis.contract_2e(eri, fcivec, norb, nelec, orbsym=cis.orbsym, wfnsym=0)
ci1ref = direct_spin1.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
ci1 = contract_2e(eri, fcivec, norb, nelec, orbsym=orbsym)
ci1ref = direct_spin1.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
cis.wfnsym = 3
e = cis.kernel(h1e, eri, norb, nelec, ecore=m.energy_nuc(), davidson_only=True)[0]
print(e, e - -74.695029029452357)
mol.atom = [['H', (0, 0, i)] for i in range(8)]
mol.basis = {'H': 'sto-3g'}
mol.symmetry = True
mol.build()
m = scf.RHF(mol)
ehf = m.scf()
norb = m.mo_coeff.shape[1]
nelec = mol.nelectron + 1
eri = ao2mo.incore.full(m._eri, m.mo_coeff)
na = cistring.num_strings(norb, nelec//2+1)
nb = cistring.num_strings(norb, nelec//2)
fcivec = numpy.random.random((na,nb))
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb, m.mo_coeff)
orbsym = orbsym % 10
fcivec = addons.symmetrize_wfn(fcivec, norb, nelec, orbsym, wfnsym=5)
cis = FCISolver(mol)
cis.orbsym = orbsym
cis.wfnsym = 5
ci1 = cis.contract_2e(eri, fcivec, norb, nelec)
ci1ref = direct_spin1.contract_2e(eri, fcivec, norb, nelec)
print(numpy.allclose(ci1ref, ci1))
| apache-2.0 | -4,856,479,504,770,219,000 | 39.964135 | 93 | 0.600762 | false |
mercycorps/TolaActivity | scripts/workflow_migration.py | 1 | 2604 | from django.apps import apps
from django import db
from django.db import connection
app_models = apps.get_app_config('workflow').get_models()
#rename the app tables from the old activitydb to workflow
def run():
print "Migration"
for app in app_models:
name = str(app._meta.db_table)
new_appname = "tola_activity." + name
temp = name.split("_")
old_appname = "tola_activity.activitydb_" + temp[1]
sql_query = "RENAME TABLE %s TO %s" % (old_appname,new_appname)
print sql_query
#catch any existing tables
try:
# Renaming model from 'Foo' to 'Bar'
with connection.cursor() as cursor:
cursor.execute(sql_query)
except:
"Table Already Exists"
name_list = [
'program_country',
'program_fund_code',
'program_sector',
'program_user_access',
'projectagreement_evaluate',
'projectagreement_capacity',
'projectagreement_stakeholder',
'projectagreement_site',
'projectcomplete_site',
'projectcomplete_stakeholder',
'quantitativeoutputs',
'stakeholder_contact',
'tolauser_countries'
]
for name in name_list:
old_appname = "tola_activity.activitydb_" + name
new_appname = "tola_activity.workflow_" + name
sql_query = "RENAME TABLE %s TO %s" % (old_appname, new_appname)
try:
# Renaming model from 'Foo' to 'Bar'
with connection.cursor() as cursor:
cursor.execute(sql_query)
except:
"Table Already Exists"
# rename formlibrary tables
try:
# Renaming model from 'Foo' to 'Bar'
with connection.cursor() as cursor:
cursor.execute("RENAME TABLE activitydb_beneficiary TO formlibrary_beneficiary")
cursor.execute("RENAME TABLE activitydb_beneficiary_distribution TO formlibrary_beneficiary_distribution")
cursor.execute("RENAME TABLE activitydb_beneficiary_program TO formlibrary_beneficiary_program")
cursor.execute("RENAME TABLE activitydb_beneficiary_training TO formlibrary_beneficiary_training")
cursor.execute("RENAME TABLE activitydb_trainingattendance TO formlibrary_trainingattendance")
cursor.execute("RENAME TABLE activitydb_distribution TO formlibrary_distribution")
except:
"Table Already Exists"
| apache-2.0 | 3,096,823,763,160,694,300 | 35.676056 | 122 | 0.596006 | false |
podemos-info/odoo | addons/warning/warning.py | 4 | 11746 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from osv import fields,osv
from tools.translate import _
WARNING_MESSAGE = [
('no-message','No Message'),
('warning','Warning'),
('block','Blocking Message')
]
WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.')
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sale Order', help=WARNING_HELP, required=True),
'sale_warn_msg' : fields.text('Message for Sale Order'),
'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True),
'purchase_warn_msg' : fields.text('Message for Purchase Order'),
'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True),
'picking_warn_msg' : fields.text('Message for Stock Picking'),
'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True),
'invoice_warn_msg' : fields.text('Message for Invoice'),
}
_defaults = {
'sale_warn' : 'no-message',
'purchase_warn' : 'no-message',
'picking_warn' : 'no-message',
'invoice_warn' : 'no-message',
}
res_partner()
class sale_order(osv.osv):
_inherit = 'sale.order'
def onchange_partner_id(self, cr, uid, ids, part):
if not part:
return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'partner_order_id':False, 'payment_term' : False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part)
if partner.sale_warn != 'no-message':
if partner.sale_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (partner.name), partner.sale_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.sale_warn_msg
warning = {
'title': title,
'message': message,
}
result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
sale_order()
class purchase_order(osv.osv):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part):
if not part:
return {'value':{'partner_address_id': False}}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, part)
if partner.purchase_warn != 'no-message':
if partner.purchase_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (partner.name), partner.purchase_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.purchase_warn_msg
warning = {
'title': title,
'message': message
}
result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
purchase_order()
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
if not partner_id:
return {'value': {
'address_contact_id': False ,
'address_invoice_id': False,
'account_id': False,
'payment_term': False,
}
}
warning = {}
title = False
message = False
partner = self.pool.get('res.partner').browse(cr, uid, partner_id)
if partner.invoice_warn != 'no-message':
if partner.invoice_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (partner.name), partner.invoice_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.invoice_warn_msg
warning = {
'title': title,
'message': message
}
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False, partner_bank_id=False)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
account_invoice()
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, context, partner_id=None):
if not partner_id:
return {}
partner = self.pool.get('res.partner.address').browse(cr, uid, [partner_id])[0].partner_id
warning = {}
title = False
message = False
if partner.picking_warn != 'no-message':
if partner.picking_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (partner.name), partner.picking_warn_msg)
title = _("Warning for %s") % partner.name
message = partner.picking_warn_msg
warning = {
'title': title,
'message': message
}
result = super(stock_picking, self).onchange_partner_in(cr, uid, context, partner_id)
if result.get('warning',False):
warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title']
warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
stock_picking()
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sale Order Line', help=WARNING_HELP, required=True),
'sale_line_warn_msg' : fields.text('Message for Sale Order Line'),
'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True),
'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'),
}
_defaults = {
'sale_line_warn' : 'no-message',
'purchase_line_warn' : 'no-message',
}
product_product()
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False,
fiscal_position=False, flag=False, context=None):
warning = {}
if not product:
return {'value': {'th_weight' : 0, 'product_packaging': False,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.sale_line_warn != 'no-message':
if product_info.sale_line_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (product_info.name), product_info.sale_line_warn_msg)
title = _("Warning for %s") % product_info.name
message = product_info.sale_line_warn_msg
warning['title'] = title
warning['message'] = message
result = super(sale_order_line, self).product_id_change( cr, uid, ids, pricelist, product, qty,
uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, context=context)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
sale_order_line()
class purchase_order_line(osv.osv):
_inherit = 'purchase.order.line'
def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False, fiscal_position=False, date_planned=False,
name=False, price_unit=False, notes=False, context=None):
warning = {}
if not product:
return {'value': {'price_unit': 0.0, 'name':'','notes':'', 'product_uom' : False}, 'domain':{'product_uom':[]}}
product_obj = self.pool.get('product.product')
product_info = product_obj.browse(cr, uid, product)
title = False
message = False
if product_info.purchase_line_warn != 'no-message':
if product_info.purchase_line_warn == 'block':
raise osv.except_osv(_('Alert for %s !') % (product_info.name), product_info.purchase_line_warn_msg)
title = _("Warning for %s") % product_info.name
message = product_info.purchase_line_warn_msg
warning['title'] = title
warning['message'] = message
result = super(purchase_order_line, self).product_id_change(cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order, fiscal_position)
if result.get('warning',False):
warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title']
warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message']
return {'value': result.get('value',{}), 'warning':warning}
product_id_change = onchange_product_id
purchase_order_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,550,563,518,757,324,000 | 43.157895 | 224 | 0.583773 | false |
jaruba/chromium.src | chrome/common/extensions/docs/server2/platform_bundle_test.py | 34 | 4427 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from extensions_paths import CHROME_API, CHROME_EXTENSIONS
from mock_file_system import MockFileSystem
from server_instance import ServerInstance
from test_file_system import TestFileSystem
from test_util import ReadFile
_TEST_DATA = {
'api': {
'devtools': {
'inspected_window.json': ReadFile(
CHROME_API, 'devtools', 'inspected_window.json'),
},
'_api_features.json': json.dumps({
'alarms': {},
'app': {'extension_types': ['platform_app']},
'app.runtime': {'noparent': True},
'app.runtime.foo': {'extension_types': ['extension']},
'declarativeWebRequest': {'extension_types': ['extension']},
'devtools.inspectedWindow': {'extension_types': ['extension']},
'input': {'extension_types': 'all'},
'input.ime': {'extension_types': ['extension', 'platform_app']},
'storage': {'extension_types': ['extension']},
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'alarms.idl': ReadFile(CHROME_API, 'alarms.idl'),
'input_ime.json': ReadFile(CHROME_API, 'input_ime.json'),
'page_action.json': ReadFile(CHROME_API, 'page_action.json'),
},
'docs': {
'templates': {
'json': {
'manifest.json': '{}',
'permissions.json': '{}',
}
}
},
}
class PlatformBundleTest(unittest.TestCase):
def setUp(self):
mock_file_system = MockFileSystem(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS))
server_instance = ServerInstance.ForTest(file_system=mock_file_system)
self._platform_bundle = server_instance.platform_bundle
def testGetters(self):
self.assertEqual([
'alarms',
'app.runtime',
'declarativeWebRequest',
'devtools.inspectedWindow',
'input',
'storage'
], sorted(self._platform_bundle.GetAPIModels('extensions').GetNames()))
self.assertEqual([
'alarms',
'app',
'app.runtime',
'input'
], sorted(self._platform_bundle.GetAPIModels('apps').GetNames()))
self.assertEqual({
'app.runtime': {
'name': 'app.runtime',
'noparent': True,
'channel': 'stable'
},
'declarativeWebRequest': {
'name': 'declarativeWebRequest',
'channel': 'stable',
'extension_types': ['extension'],
},
'app.runtime.foo': {
'name': 'app.runtime.foo',
'channel': 'stable',
'extension_types': ['extension'],
},
'storage': {
'name': 'storage',
'channel': 'stable',
'extension_types': ['extension'],
},
'input.ime': {
'name': 'input.ime',
'channel': 'stable',
'extension_types': ['extension', 'platform_app'],
},
'alarms': {
'name': 'alarms',
'channel': 'stable'
},
'input': {
'name': 'input',
'channel': 'stable',
'extension_types': 'all'
},
'devtools.inspectedWindow': {
'name': 'devtools.inspectedWindow',
'channel': 'stable',
'extension_types': ['extension'],
}
}, self._platform_bundle.GetFeaturesBundle(
'extensions').GetAPIFeatures().Get())
self.assertEqual({
'app.runtime': {
'name': 'app.runtime',
'noparent': True,
'channel': 'stable'
},
'input': {
'name': 'input',
'channel': 'stable',
'extension_types': 'all'
},
'input.ime': {
'name': 'input.ime',
'channel': 'stable',
'extension_types': ['extension', 'platform_app'],
},
'app': {
'name': 'app',
'channel': 'stable',
'extension_types': ['platform_app'],
},
'alarms': {
'name': 'alarms',
'channel': 'stable'
}
}, self._platform_bundle.GetFeaturesBundle('apps').GetAPIFeatures().Get())
# Check that 'app' is resolved successfully in apps, but is None otherwise.
self.assertNotEqual(
None,
self._platform_bundle.GetReferenceResolver('apps').GetLink('app'))
self.assertEqual(
None,
self._platform_bundle.GetReferenceResolver('extensions').GetLink('app'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -7,554,844,200,217,460,000 | 27.934641 | 80 | 0.567879 | false |
broferek/ansible | test/units/modules/remote_management/lxca/test_lxca_nodes.py | 21 | 4734 | import json
import pytest
from units.compat import mock
from ansible.modules.remote_management.lxca import lxca_nodes
from ansible.module_utils.remote_management.lxca.common import setup_conn
from ansible.module_utils.remote_management.lxca.common import close_conn
@pytest.fixture(scope='module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.close_conn", autospec=True)
def setup_module(close_conn):
close_conn.return_value = True
class TestMyModule():
@pytest.mark.parametrize('patch_ansible_module',
[
{},
{
"auth_url": "https://10.240.14.195",
"login_user": "USERID",
},
{
"auth_url": "https://10.240.14.195",
"login_password": "Password",
},
{
"login_user": "USERID",
"login_password": "Password",
},
],
indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.execute_module", autospec=True)
def test_without_required_parameters(self, _setup_conn, _execute_module,
mocker, capfd, setup_module):
"""Failure must occurs when all parameters are missing"""
with pytest.raises(SystemExit):
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = "Fake execution"
lxca_nodes.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.execute_module", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.AnsibleModule", autospec=True)
def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
expected_arguments_spec = dict(
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
command_options=dict(default='nodes', choices=['nodes', 'nodes_by_uuid',
'nodes_by_chassis_uuid',
'nodes_status_managed',
'nodes_status_unmanaged']),
auth_url=dict(required=True),
uuid=dict(default=None),
chassis=dict(default=None),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"command_options": "nodes",
}
mod_obj.params = args
lxca_nodes.main()
assert(mock.call(argument_spec=expected_arguments_spec,
supports_check_mode=False) == ansible_mod_cls.call_args)
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes._nodes_by_uuid",
autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_nodes.AnsibleModule",
autospec=True)
def test__nodes_empty_list(self, ansible_mod_cls, _get_nodes, _setup_conn, setup_module):
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"uuid": "3C737AA5E31640CE949B10C129A8B01F",
"command_options": "nodes_by_uuid",
}
mod_obj.params = args
_setup_conn.return_value = "Fake connection"
empty_nodes_list = []
_get_nodes.return_value = empty_nodes_list
ret_nodes = _get_nodes(mod_obj, args)
assert mock.call(mod_obj, mod_obj.params) == _get_nodes.call_args
assert _get_nodes.return_value == ret_nodes
| gpl-3.0 | 5,852,063,080,410,095,000 | 47.306122 | 98 | 0.546895 | false |
mgraupe/acq4 | acq4/util/generator/SeqParamSet.py | 4 | 6918 | import acq4.util.units as units
from acq4.pyqtgraph.parametertree.parameterTypes import SimpleParameter, GroupParameter
import acq4.pyqtgraph as pg
import numpy as np
import acq4.util.functions as fn
import sys, collections
class SequenceParamSet(GroupParameter):
## top-level parameter in the simple stim generator tree
def __init__(self):
GroupParameter.__init__(self, name='SequenceParams', type='group',
addText='Add Sequence Parameter')
self.meta = {}
def addNew(self):
with self.treeChangeBlocker(): ## about to make lots of tree changes;
## suppress change signal until we're done.
ch = self.addChild(SeqParameter())
#if type == 'Pulse':
#ch = self.addChild(PulseParameter())
#elif type == 'Pulse Train':
#ch = self.addChild(PulseTrainParameter())
#else:
#raise Exception('Unknown type %s' % type)
#for ax in self.meta:
#self.setMeta(ax, self.meta[ax], ch)
def compile(self):
params = collections.OrderedDict()
for ch in self:
try:
params[ch.name()] = ch.compile()
except SeqEvalError as ex:
#print sys.exc_info()
raise Exception("'%s.%s': %s" % (ch.name(), ex.name, ex.exc))
except:
raise Exception("'%s': %s" % (ch.name(), str(sys.exc_info()[1])))
return params
def setState(self, state):
with self.treeChangeBlocker():
self.clearChildren()
for k in state:
ch = self.addChild(SeqParameter())
ch.setName(k)
ch.setState(state[k])
def getState(self):
state = collections.OrderedDict()
for ch in self:
state[ch.name()] = ch.getState()
return state
class SeqEvalError(Exception): ## raised when a sequence parameter field fails to evaluate
def __init__(self, name, exc):
Exception.__init__(self)
self.name = name
self.exc = str(exc)
class SeqParameter(GroupParameter):
def __init__(self, **args):
self.evalLocals = units.allUnits.copy()
exec('from numpy import *', self.evalLocals) ## import all of numpy into the eval namespace
args['renamable'] = True
args['removable'] = True
args['name'] = args.get('name', 'Param')
args['autoIncrementName'] = True
args['strictNaming'] = True
args['children'] = [
{'name': 'default', 'type': 'str', 'value': '0'},
{'name': 'sequence', 'type': 'list', 'value': 'off', 'values': ['off', 'range', 'list', 'eval']},
{'name': 'start', 'type': 'str', 'value': '0', 'visible': False},
{'name': 'stop', 'type': 'str', 'value': '0', 'visible': False},
{'name': 'steps', 'type': 'int', 'value': 10, 'visible': False},
{'name': 'log spacing', 'type': 'bool', 'value': False, 'visible': False},
{'name': 'list', 'type': 'str', 'value': '', 'visible': False},
{'name': 'randomize', 'type': 'bool', 'value': False, 'visible': False},
{'name': 'expression', 'type': 'str', 'visible': False},
]
GroupParameter.__init__(self, **args)
#self.sequence.sigValueChanged.connect(self.seqChanged)
self.visibleParams = { ## list of params to display in each mode
'off': ['default', 'sequence'],
'range': ['default', 'sequence', 'start', 'stop', 'steps', 'log spacing', 'randomize'],
'list': ['default', 'sequence', 'list', 'randomize'],
'eval': ['default', 'sequence', 'expression']
}
def treeStateChanged(self, param, changes):
## catch changes to 'sequence' so we can hide/show other params.
## Note: it would be easier to just catch self.sequence.sigValueChanged,
## but this approach allows us to block tree change events so they are all
## released as a single update.
with self.treeChangeBlocker():
## queue up change
GroupParameter.treeStateChanged(self, param, changes)
## if needed, add some more changes before releasing the signal
for param, change, data in changes:
## if the sequence value changes, hide/show other parameters
if param is self.param('sequence') and change == 'value':
vis = self.visibleParams[self['sequence']]
for ch in self:
if ch.name() in vis:
ch.show()
else:
ch.hide()
#def seqChanged(self):
#with self.treeChangeBlocker():
#vis = self.visibleParams[self['sequence']]
#for ch in self:
#if ch.name() in vis:
#ch.show()
#else:
#ch.hide()
def compile(self):
name = self.name()
default = self.evalStr('default')
mode = self['sequence']
if mode == 'off':
seq = []
elif mode == 'range':
start = self.evalStr('start')
stop = self.evalStr('stop')
nPts = self['steps']
if self['log spacing']:
seq = fn.logSpace(start, stop, nPts)
else:
seq = np.linspace(start, stop, nPts)
elif mode == 'list':
seq = list(self.evalStr('list'))
elif mode == 'eval':
seq = self.evalStr('expression')
else:
raise Exception('Unknown sequence mode %s' % mode)
if self['randomize']:
np.random.shuffle(seq)
## sanity check
try:
len(seq)
except:
raise Exception("Parameter %s generated invalid sequence: %s" % (name, str(seq)))
return default, seq
def evalStr(self, name):
try:
s = eval(self[name], self.evalLocals)
except:
raise SeqEvalError(name, sys.exc_info()[1])
return s
def setState(self, state):
for k in state:
self[k] = state[k]
self.param(k).setDefault(state[k])
def getState(self):
state = collections.OrderedDict()
for ch in self:
if not ch.opts['visible']:
continue
name = ch.name()
val = ch.value()
if val is False:
continue
state[name] = val
return state
| mit | -6,556,013,341,453,065,000 | 35.797872 | 109 | 0.500867 | false |
kerimlcr/ab2017-dpyo | ornek/lollypop/lollypop-0.9.229/src/loader.py | 1 | 1995 | # Copyright (c) 2014-2016 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib
from threading import Thread, Lock
class Loader(Thread):
"""
Helper to load data on a separate thread and
dispatch it to the UI thread
"""
active = {}
active_lock = Lock()
def __init__(self, target, view=None, on_finished=None):
Thread.__init__(self)
self.daemon = True
self._target = target
self._view = view
self._on_finished = on_finished
self._invalidated = False
self._invalidated_lock = Lock()
def is_invalidated(self):
with self._invalidated_lock:
return self._invalidated
def invalidate(self):
with self._invalidated_lock:
self._invalidated = True
def run(self):
with Loader.active_lock:
active = Loader.active.get(self._view, None)
if active:
active.invalidate()
Loader.active[self._view] = self
result = self._target()
if not self.is_invalidated():
if self._on_finished:
GLib.idle_add(self._on_finished, (result))
elif self._view:
GLib.idle_add(self._view.populate, (result))
with Loader.active_lock:
Loader.active.pop(self._view, None)
| gpl-3.0 | -5,487,275,079,339,246,000 | 34.625 | 76 | 0.637093 | false |
onyb/dune | runapiserver.py | 1 | 2003 | from core.api import API
from core.exceptions import *
from core.utils.Colorize import Colorize
from core.utils.check_sanity import *
def main():
# Order of checks is important
checks = [
(
check_environment,
'Check current shell environment',
OPAMConfigurationError
),
(
check_mirage,
'Check MirageOS',
UnikernelLibraryNotFound
),
(
is_not_root,
'Check if current user is NOT "root"',
ExcessivePrivilegeError
),
(
check_privilege,
'Check if user "%s" has the required privilege to run APT' % getuser(), # User cannot be "root" here
InsufficientPrivilegeError
),
(
check_redis_server,
'Check running instance of Redis server',
RedisServerNotFound
),
(
check_mongod_server,
'Check running instance of MongoDB server',
MongoDBServerNotFound
),
(
check_redis_queue,
'Check Python Redis Queue worker',
RedisQueueException
)
]
print(
' ' + '-' * 79 + '\n',
Colorize.light_purple('Performing startup sanity check') + '\n',
'-' * 79
)
for check in checks:
if not check[0]():
print(
Colorize.light_purple(' *'), check[1], '.' * (80 - len(check[1]) - 12), Colorize.red('FAILURE')
)
raise check[2]
else:
print(
Colorize.light_purple(' *'), check[1], '.' * (80 - len(check[1]) - 12), Colorize.green('SUCCESS')
)
port = int(
os.environ.get(
'PORT',
5000
)
)
API().create_app()
API().create_mongo()
API.app.run(
host='0.0.0.0',
port=port,
use_reloader=False
)
if __name__ == "__main__":
main()
| apache-2.0 | 1,031,202,060,903,405,700 | 23.426829 | 113 | 0.480779 | false |
jcoady9/youtube-dl | youtube_dl/extractor/chirbit.py | 4 | 2804 | # coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..utils import parse_duration
class ChirbitIE(InfoExtractor):
IE_NAME = 'chirbit'
_VALID_URL = r'https?://(?:www\.)?chirb\.it/(?:(?:wp|pl)/|fb_chirbit_player\.swf\?key=)?(?P<id>[\da-zA-Z]+)'
_TESTS = [{
'url': 'http://chirb.it/be2abG',
'info_dict': {
'id': 'be2abG',
'ext': 'mp3',
'title': 'md5:f542ea253f5255240be4da375c6a5d7e',
'description': 'md5:f24a4e22a71763e32da5fed59e47c770',
'duration': 306,
},
'params': {
'skip_download': True,
}
}, {
'url': 'https://chirb.it/fb_chirbit_player.swf?key=PrIPv5',
'only_matching': True,
}, {
'url': 'https://chirb.it/wp/MN58c2',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(
'http://chirb.it/%s' % audio_id, audio_id)
data_fd = self._search_regex(
r'data-fd=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage, 'data fd', group='url')
# Reverse engineered from https://chirb.it/js/chirbit.player.js (look
# for soundURL)
audio_url = base64.b64decode(
data_fd[::-1].encode('ascii')).decode('utf-8')
title = self._search_regex(
r'class=["\']chirbit-title["\'][^>]*>([^<]+)', webpage, 'title')
description = self._search_regex(
r'<h3>Description</h3>\s*<pre[^>]*>([^<]+)</pre>',
webpage, 'description', default=None)
duration = parse_duration(self._search_regex(
r'class=["\']c-length["\'][^>]*>([^<]+)',
webpage, 'duration', fatal=False))
return {
'id': audio_id,
'url': audio_url,
'title': title,
'description': description,
'duration': duration,
}
class ChirbitProfileIE(InfoExtractor):
IE_NAME = 'chirbit:profile'
_VALID_URL = r'https?://(?:www\.)?chirbit.com/(?:rss/)?(?P<id>[^/]+)'
_TEST = {
'url': 'http://chirbit.com/ScarletBeauty',
'info_dict': {
'id': 'ScarletBeauty',
'title': 'Chirbits by ScarletBeauty',
},
'playlist_mincount': 3,
}
def _real_extract(self, url):
profile_id = self._match_id(url)
rss = self._download_xml(
'http://chirbit.com/rss/%s' % profile_id, profile_id)
entries = [
self.url_result(audio_url.text, 'Chirbit')
for audio_url in rss.findall('./channel/item/link')]
title = rss.find('./channel/title').text
return self.playlist_result(entries, profile_id, title)
| unlicense | -5,642,764,742,923,788,000 | 30.155556 | 112 | 0.516405 | false |
sachitanandpandey/sos_spandey | sos/plugins/i18n.py | 5 | 1102 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class I18n(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Internationalization
"""
plugin_name = 'i18n'
profiles = ('system',)
def setup(self):
self.add_copy_spec([
"/etc/X11/xinit/xinput.d/*",
"/etc/locale.conf"
])
self.add_cmd_output("locale")
# vim: et ts=4 sw=4
| gpl-2.0 | -949,272,584,213,583,700 | 33.4375 | 72 | 0.705989 | false |
ict-felix/stack | ofam/src/src/ext/geni/util/cred_util.py | 3 | 15129 | #----------------------------------------------------------------------
# Copyright (c) 2010 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
Credential creation and verification utilities.
'''
import os
import logging
import xmlrpclib
import sys
import datetime
import dateutil
import sfa.trust.credential as cred
import sfa.trust.gid as gid
import sfa.trust.rights as rights
from sfa.util.xrn import hrn_authfor_hrn
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_file is a trusted root file file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
self.logger = logging.getLogger('cred-verifier')
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files? The combo cert file?
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files))
elif os.path.isfile(root_cert_fileordir):
self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir)
self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
logger = logging.getLogger('cred-verifier')
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
logger.debug('Skipping non file %s', filepath)
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
logger.info("Adding trusted cert file %s", filename)
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
else:
logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath)
return comboFullPath
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
if gid_string is None:
return
def make_cred(cred_string):
return cred.Credential(string=cred_string)
return self.verify(gid.GID(string=gid_string),
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
else:
self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn)
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
else:
self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn)
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn())
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example
# The semantics of the list of credentials is under specified.
self.logger.debug('Verifying privileges')
result = list()
failure = ""
tried_creds = ""
for cred in credentials:
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cred.get_gid_caller().get_urn())
else:
tried_creds = cred.get_gid_caller().get_urn()
if not self.verify_source(gid, cred):
failure = "Cred %s fails: Source URNs dont match" % cred.get_gid_caller().get_urn()
continue
if not self.verify_target(target_urn, cred):
failure = "Cred %s on %s fails: Target URNs dont match" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
if not self.verify_privileges(privileges, cred):
failure = "Cert %s doesn't have sufficient privileges" % cred.get_gid_caller().get_urn()
continue
print
try:
if not cred.verify(self.root_cert_files):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
self.logger.info(failure)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
self.logger.error(fault_string)
raise xmlrpclib.Fault(fault_code, fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc)
return ucred
| apache-2.0 | -9,199,270,352,480,334,000 | 44.984802 | 250 | 0.63375 | false |
schoonc/AutobahnPython | examples/twisted/wamp/rpc/slowsquare/frontend.py | 8 | 2561 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import time
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component using the time service.
"""
def onJoin(self, details):
print("session attached")
def got(res, started, msg):
duration = 1000. * (time.clock() - started)
print("{}: {} in {}".format(msg, res, duration))
t1 = time.clock()
d1 = self.call(u'com.math.slowsquare', 3)
d1.addCallback(got, t1, "Slow Square")
t2 = time.clock()
d2 = self.call(u'com.math.square', 3)
d2.addCallback(got, t2, "Quick Square")
def done(_):
print("All finished.")
self.leave()
DeferredList([d1, d2]).addBoth(done)
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit | -8,066,680,149,916,182,000 | 33.608108 | 79 | 0.639984 | false |
dargor/python-guess-random-color | Color.py | 1 | 2444 | #
# Copyright (c) 2016, Gabriel Linder <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
from random import randint
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
class Color:
@classmethod
def from_rgb(cls, rgb):
c = sRGBColor.new_from_rgb_hex(rgb).get_upscaled_value_tuple()
return cls(c[0], c[1], c[2])
def __init__(self, red, green, blue):
assert 0 <= red <= 255
self.red = red
assert 0 <= green <= 255
self.green = green
assert 0 <= blue <= 255
self.blue = blue
self.Lab = convert_color(sRGBColor(self.red,
self.green,
self.blue,
is_upscaled=True),
LabColor,
target_illuminant='d50')
def delta_e(self, color):
return delta_e_cie2000(self.Lab, color.Lab)
@property
def rgb(self):
return '#{:02X}{:02X}{:02X}'.format(self.red,
self.green,
self.blue)
def __str__(self):
return '\033[38;2;{};{};{}m{}\033[0m'.format(self.red,
self.green,
self.blue,
'\u2588' * 4)
class RandomColor(Color):
def __init__(self):
red = randint(0, 255)
green = randint(0, 255)
blue = randint(0, 255)
super().__init__(red, green, blue)
| isc | -5,656,948,300,875,733,000 | 32.479452 | 79 | 0.550327 | false |
jgors/duecredit | duecredit/io.py | 1 | 11094 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from citeproc.source.bibtex import BibTeX as cpBibTeX
import citeproc as cp
import time
from collections import defaultdict, Iterator
import copy
import os
from os.path import dirname, exists
import pickle
import requests
import tempfile
from six import PY2, itervalues, iteritems
import warnings
from .config import CACHE_DIR, DUECREDIT_FILE
from .entries import BibTeX, Doi
from .log import lgr
def get_doi_cache_file(doi):
return os.path.join(CACHE_DIR, doi)
def import_doi(doi):
cached = get_doi_cache_file(doi)
if exists(cached):
with open(cached) as f:
doi = f.read()
if PY2:
return doi.decode('utf-8')
return doi
# else -- fetch it
headers = {'Accept': 'text/bibliography; style=bibtex'}
url = 'http://dx.doi.org/' + doi
retries = 10
while retries > 0:
r = requests.get(url, headers=headers)
r.encoding = 'UTF-8'
bibtex = r.text.strip()
if bibtex.startswith('@'):
# no more retries necessary
break
lgr.warning("Failed to obtain bibtex from doi.org, retrying...")
time.sleep(0.5) # give some time to the server
retries -= 1
status_code = r.status_code
if not bibtex.startswith('@'):
raise ValueError('Query %(url)s for BibTeX for a DOI %(doi)s (wrong doi?) has failed. '
'Response code %(status_code)d. '
#'BibTeX response was: %(bibtex)s'
% locals())
if not exists(cached):
cache_dir = dirname(cached)
if not exists(cache_dir):
os.makedirs(cache_dir)
with open(cached, 'w') as f:
if PY2:
f.write(bibtex.encode('utf-8'))
else:
f.write(bibtex)
return bibtex
class EnumeratedEntries(Iterator):
"""A container of entries enumerated referenced by their entry_key"""
def __init__(self):
self._keys2refnr = {}
self._refnr2keys = {}
self._refnr = 1
def add(self, entry_key):
"""Add entry_key and update refnr"""
if entry_key not in self._keys2refnr:
self._keys2refnr[entry_key] = self._refnr
self._refnr2keys[self._refnr] = entry_key
self._refnr += 1
def __getitem__(self, item):
if item not in self._keys2refnr:
raise KeyError('{0} not present'.format(item))
return self._keys2refnr[item]
def fromrefnr(self, refnr):
if refnr not in self._refnr2keys:
raise KeyError('{0} not present'.format(refnr))
return self._refnr2keys[refnr]
def __iter__(self):
return iteritems(self._keys2refnr)
# Python 3
def __next__(self):
return self.next()
def next(self):
yield next(self.__iter__())
def __len__(self):
return len(self._keys2refnr)
class TextOutput(object): # TODO some parent class to do what...?
def __init__(self, fd, collector, style=None):
self.fd = fd
self.collector = collector
# TODO: check that CLS style actually exists
self.style = style
if 'DUECREDIT_STYLE' in os.environ.keys():
self.style = os.environ['DUECREDIT_STYLE']
else:
self.style = 'harvard1'
# TODO: refactor name to sth more intuitive
def _model_citations(self, tags=None):
if not tags:
tags = os.environ.get('DUECREDIT_REPORT_TAGS', 'reference-implementation,implementation').split(',')
tags = set(tags)
citations = self.collector.citations
if tags != {'*'}:
# Filter out citations
citations = dict((k, c)
for k, c in iteritems(citations)
if tags.intersection(c.tags))
packages = {}
modules = {}
objects = {}
for key in ('citations', 'entry_keys'):
packages[key] = defaultdict(list)
modules[key] = defaultdict(list)
objects[key] = defaultdict(list)
# for each path store both a list of entry keys and of citations
for (path, entry_key), citation in iteritems(citations):
if ':' in path:
target_dict = objects
elif '.' in path:
target_dict = modules
else:
target_dict = packages
target_dict['citations'][path].append(citation)
target_dict['entry_keys'][path].append(entry_key)
return packages, modules, objects
def dump(self, tags=None):
# get 'model' of citations
packages, modules, objects = self._model_citations(tags)
# mapping key -> refnr
enum_entries = EnumeratedEntries()
citations_ordered = []
# set up view
# package level
sublevels = [modules, objects]
for package in sorted(packages['entry_keys']):
for entry_key in packages['entry_keys'][package]:
enum_entries.add(entry_key)
citations_ordered.append(package)
# sublevels
for sublevel in sublevels:
for obj in sorted(filter(lambda x: package in x, sublevel['entry_keys'])):
for entry_key_obj in sublevel['entry_keys'][obj]:
enum_entries.add(entry_key_obj)
citations_ordered.append(obj)
# Now we can "render" different views of our "model"
# Here for now just text BUT that is where we can "split" the logic and provide
# different renderings given the model -- text, rest, md, tex+latex, whatever
self.fd.write('\nDueCredit Report:\n')
for path in citations_ordered:
if ':' in path:
self.fd.write(' ')
target_dict = objects
elif '.' in path:
self.fd.write(' ')
target_dict = modules
else:
target_dict = packages
# TODO: absorb common logic into a common function
citations = target_dict['citations'][path]
entry_keys = target_dict['entry_keys'][path]
descriptions = sorted(map(str, set(str(r.description) for r in citations)))
versions = sorted(map(str, set(str(r.version) for r in citations)))
refnrs = sorted([str(enum_entries[entry_key]) for entry_key in entry_keys])
self.fd.write('- {0} / {1} (v {2}) [{3}]\n'.format(
", ".join(descriptions), path, ', '.join(versions), ', '.join(refnrs)))
# Print out some stats
obj_names = ('packages', 'modules', 'functions')
n_citations = map(len, (packages['citations'], modules['citations'], objects['citations']))
for citation_type, n in zip(obj_names, n_citations):
self.fd.write('\n{0} {1} cited'.format(n, citation_type))
if enum_entries:
citations_fromentrykey = self.collector._citations_fromentrykey()
self.fd.write('\n\nReferences\n' + '-' * 10 + '\n')
# collect all the entries used
refnr_key = [(nr, enum_entries.fromrefnr(nr)) for nr in range(1, len(enum_entries)+1)]
for nr, key in refnr_key:
self.fd.write('\n[{0}] '.format(nr))
self.fd.write(get_text_rendering(citations_fromentrykey[key], style=self.style))
self.fd.write('\n')
def get_text_rendering(citation, style='harvard1'):
from .collector import Citation
entry = citation.entry
if isinstance(entry, Doi):
bibtex_rendering = get_bibtex_rendering(entry)
bibtex_citation = copy.copy(citation)
bibtex_citation.set_entry(bibtex_rendering)
return get_text_rendering(bibtex_citation)
elif isinstance(entry, BibTeX):
return format_bibtex(entry, style=style)
else:
return str(entry)
def get_bibtex_rendering(entry):
if isinstance(entry, Doi):
return BibTeX(import_doi(entry.doi))
elif isinstance(entry, BibTeX):
return entry
else:
raise ValueError("Have no clue how to get bibtex out of %s" % entry)
def format_bibtex(bibtex_entry, style='harvard1'):
key = bibtex_entry.get_key()
# need to save it temporarily to use citeproc-py
fname = tempfile.mktemp(suffix='.bib')
try:
with open(fname, 'wt') as f:
bibtex = bibtex_entry.rawentry
bibtex = bibtex.replace(u'\u2013', '--') + "\n"
# TODO: manage to save/use UTF-8
if PY2:
bibtex = bibtex.encode('ascii', 'ignore')
f.write(bibtex)
# We need to avoid cpBibTex spitting out warnings
old_filters = warnings.filters[:] # store a copy of filters
warnings.simplefilter('ignore', UserWarning)
try:
bib_source = cpBibTeX(fname)
except Exception as e:
lgr.error("Failed to process BibTeX file %s" % fname)
return "ERRORED: %s" % str(e)
finally:
# return warnings back
warnings.filters = old_filters
bib_style = cp.CitationStylesStyle(style, validate=False)
# TODO: specify which tags of formatter we want
bibliography = cp.CitationStylesBibliography(bib_style, bib_source,
cp.formatter.plain)
citation = cp.Citation([cp.CitationItem(key)])
bibliography.register(citation)
finally:
os.unlink(fname)
biblio_out = bibliography.bibliography()
assert(len(biblio_out) == 1)
biblio_out = ''.join(biblio_out[0])
return biblio_out # if biblio_out else str(bibtex_entry)
# TODO: harmonize order of arguments
class PickleOutput(object):
def __init__(self, collector, fn=DUECREDIT_FILE):
self.collector = collector
self.fn = fn
def dump(self):
with open(self.fn, 'wb') as f:
pickle.dump(self.collector, f)
@classmethod
def load(cls, filename=DUECREDIT_FILE):
with open(filename, 'rb') as f:
return pickle.load(f)
class BibTeXOutput(object): # TODO some parent class to do what...?
def __init__(self, fd, collector):
self.fd = fd
self.collector = collector
def dump(self):
for citation in self.collector.citations.values():
try:
bibtex = get_bibtex_rendering(citation.entry)
except:
lgr.warning("Failed to generate bibtex for %s" % citation.entry)
continue
self.fd.write(bibtex.rawentry + "\n")
def load_due(filename):
return PickleOutput.load(filename)
| bsd-2-clause | 7,645,720,196,949,251,000 | 35.019481 | 112 | 0.570669 | false |
mlassnig/pilot | HPC/Logger.py | 3 | 1403 | import logging
import inspect
import sys
import time
loggerMap = {}
class Logger:
def __init__(self, filename=None):
# get logger name
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if mod == None or mod.__name__ == '__main__':
modName = 'main'
else:
modName = '.'.join(mod.__name__.split('.')[-2:])
global loggerMap
if modName in loggerMap:
# use existing logger
self.log = loggerMap[modName]
else:
# make handler
fmt = logging.Formatter('%(asctime)s|%(process)s|%(name)s|%(levelname)s| %(message)s',"%Y-%m-%d %H:%M:%S")
fmt.converter = time.gmtime
if filename:
sh = logging.FileHandler(filename, mode='a')
else:
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
fmt.converter = time.gmtime
sh.setFormatter(fmt)
# make logger
self.log = logging.getLogger(modName)
self.log.propagate = False
self.log.addHandler(sh)
loggerMap[modName] = self.log
def info(self,msg):
self.log.info(msg)
def debug(self,msg):
self.log.debug(msg)
def warning(self,msg):
self.log.warning(msg)
def error(self,msg):
self.log.error(msg)
| apache-2.0 | 2,124,790,224,044,881,700 | 26.509804 | 118 | 0.528154 | false |
PyCQA/pylint | pylint/utils/pragma_parser.py | 1 | 4771 | # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import re
from collections import namedtuple
from typing import Generator, List
# Allow stopping after the first semicolon/hash encountered,
# so that an option can be continued with the reasons
# why it is active or disabled.
OPTION_RGX = r"""
\s* # Any number of whithespace
\#? # One or zero hash
.* # Anything (as much as possible)
(\s* # Beginning of first matched group and any number of whitespaces
\# # Beginning of comment
.*? # Anything (as little as possible)
\bpylint: # pylint word and column
\s* # Any number of whitespaces
([^;#\n]+)) # Anything except semicolon or hash or newline (it is the second matched group)
# and end of the first matched group
[;#]{0,1}""" # From 0 to 1 repetition of semicolon or hash
OPTION_PO = re.compile(OPTION_RGX, re.VERBOSE)
PragmaRepresenter = namedtuple("PragmaRepresenter", "action messages")
ATOMIC_KEYWORDS = frozenset(("disable-all", "skip-file"))
MESSAGE_KEYWORDS = frozenset(("disable-msg", "enable-msg", "disable", "enable"))
# sorted is necessary because sets are unordered collections and ALL_KEYWORDS
# string should not vary between executions
# reverse is necessary in order to have the longest keywords first, so that, for example,
# 'disable' string should not be matched instead of 'disable-all'
ALL_KEYWORDS = "|".join(
sorted(ATOMIC_KEYWORDS | MESSAGE_KEYWORDS, key=len, reverse=True)
)
TOKEN_SPECIFICATION = [
("KEYWORD", fr"\b({ALL_KEYWORDS:s})\b"),
("MESSAGE_STRING", r"[0-9A-Za-z\-\_]{2,}"), # Identifiers
("ASSIGN", r"="), # Assignment operator
("MESSAGE_NUMBER", r"[CREIWF]{1}\d*"),
]
TOK_REGEX = "|".join(
f"(?P<{token_name:s}>{token_rgx:s})"
for token_name, token_rgx in TOKEN_SPECIFICATION
)
def emit_pragma_representer(action, messages):
if not messages and action in MESSAGE_KEYWORDS:
raise InvalidPragmaError(
"The keyword is not followed by message identifier", action
)
return PragmaRepresenter(action, messages)
class PragmaParserError(Exception):
"""
A class for exceptions thrown by pragma_parser module
"""
def __init__(self, message, token):
"""
:args message: explain the reason why the exception has been thrown
:args token: token concerned by the exception
"""
self.message = message
self.token = token
super().__init__(self.message)
class UnRecognizedOptionError(PragmaParserError):
"""
Thrown in case the of a valid but unrecognized option
"""
class InvalidPragmaError(PragmaParserError):
"""
Thrown in case the pragma is invalid
"""
def parse_pragma(pylint_pragma: str) -> Generator[PragmaRepresenter, None, None]:
action = None
messages: List[str] = []
assignment_required = False
previous_token = ""
for mo in re.finditer(TOK_REGEX, pylint_pragma):
kind = mo.lastgroup
value = mo.group()
if kind == "ASSIGN":
if not assignment_required:
if action:
# A keyword has been found previously but doesn't support assignement
raise UnRecognizedOptionError(
"The keyword doesn't support assignment", action
)
if previous_token:
# Something found previously but not a known keyword
raise UnRecognizedOptionError(
"The keyword is unknown", previous_token
)
# Nothing at all detected before this assignment
raise InvalidPragmaError("Missing keyword before assignment", "")
assignment_required = False
elif assignment_required:
raise InvalidPragmaError("The = sign is missing after the keyword", action)
elif kind == "KEYWORD":
if action:
yield emit_pragma_representer(action, messages)
action = value
messages = list()
assignment_required = action in MESSAGE_KEYWORDS
elif kind in ("MESSAGE_STRING", "MESSAGE_NUMBER"):
messages.append(value)
assignment_required = False
else:
raise RuntimeError("Token not recognized")
previous_token = value
if action:
yield emit_pragma_representer(action, messages)
else:
raise UnRecognizedOptionError("The keyword is unknown", previous_token)
| gpl-2.0 | -1,107,622,985,180,464,500 | 35.060606 | 102 | 0.618277 | false |
pyrocko/pyrocko | src/gf/seismosizer.py | 1 | 122659 | # http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
from __future__ import absolute_import, division, print_function
from collections import defaultdict
from functools import cmp_to_key
import time
import math
import os
import re
import logging
try:
import resource
except ImportError:
resource = None
import numpy as num
from pyrocko.guts import (Object, Float, String, StringChoice, List,
Timestamp, Int, SObject, ArgumentError, Dict,
ValidationError)
from pyrocko.guts_array import Array
from pyrocko import moment_tensor as pmt
from pyrocko import trace, util, config, model
from pyrocko.orthodrome import ne_to_latlon
from pyrocko.model import Location
from . import meta, store, ws
from .targets import Target, StaticTarget, SatelliteTarget
pjoin = os.path.join
guts_prefix = 'pf'
d2r = math.pi / 180.
logger = logging.getLogger('pyrocko.gf.seismosizer')
def cmp_none_aware(a, b):
if isinstance(a, tuple) and isinstance(b, tuple):
for xa, xb in zip(a, b):
rv = cmp_none_aware(xa, xb)
if rv != 0:
return rv
return 0
anone = a is None
bnone = b is None
if anone and bnone:
return 0
if anone:
return -1
if bnone:
return 1
return bool(a > b) - bool(a < b)
def xtime():
return time.time()
class SeismosizerError(Exception):
pass
class BadRequest(SeismosizerError):
pass
class DuplicateStoreId(Exception):
pass
class NoDefaultStoreSet(Exception):
pass
class ConversionError(Exception):
pass
class NoSuchStore(BadRequest):
def __init__(self, store_id=None, dirs=None):
BadRequest.__init__(self)
self.store_id = store_id
self.dirs = dirs
def __str__(self):
if self.store_id is not None:
rstr = 'no GF store with id "%s" found.' % self.store_id
else:
rstr = 'GF store not found.'
if self.dirs is not None:
rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
return rstr
def ufloat(s):
units = {
'k': 1e3,
'M': 1e6,
}
factor = 1.0
if s and s[-1] in units:
factor = units[s[-1]]
s = s[:-1]
if not s:
raise ValueError('unit without a number: \'%s\'' % s)
return float(s) * factor
def ufloat_or_none(s):
if s:
return ufloat(s)
else:
return None
def int_or_none(s):
if s:
return int(s)
else:
return None
def nonzero(x, eps=1e-15):
return abs(x) > eps
def permudef(ln, j=0):
if j < len(ln):
k, v = ln[j]
for y in v:
ln[j] = k, y
for s in permudef(ln, j + 1):
yield s
ln[j] = k, v
return
else:
yield ln
def arr(x):
return num.atleast_1d(num.asarray(x))
def discretize_rect_source(deltas, deltat, time, north, east, depth,
strike, dip, length, width,
anchor, velocity, stf=None,
nucleation_x=None, nucleation_y=None,
decimation_factor=1):
if stf is None:
stf = STF()
mindeltagf = num.min(deltas)
mindeltagf = min(mindeltagf, deltat * velocity)
ln = length
wd = width
nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
n = int(nl * nw)
dl = ln / nl
dw = wd / nw
xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
points = num.empty((n, 3), dtype=num.float)
points[:, 0] = num.tile(xl, nw)
points[:, 1] = num.repeat(xw, nl)
points[:, 2] = 0.0
if nucleation_x is not None:
dist_x = num.abs(nucleation_x - points[:, 0])
else:
dist_x = num.zeros(n)
if nucleation_y is not None:
dist_y = num.abs(nucleation_y - points[:, 1])
else:
dist_y = num.zeros(n)
dist = num.sqrt(dist_x**2 + dist_y**2)
times = dist / velocity
anch_x, anch_y = map_anchor[anchor]
points[:, 0] -= anch_x * 0.5 * length
points[:, 1] -= anch_y * 0.5 * width
rotmat = num.asarray(
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
points = num.dot(rotmat.T, points.T).T
xtau, amplitudes = stf.discretize_t(deltat, time)
nt = xtau.size
points2 = num.repeat(points, nt, axis=0)
times2 = num.repeat(times, nt) + num.tile(xtau, n)
amplitudes2 = num.tile(amplitudes, n)
points2[:, 0] += north
points2[:, 1] += east
points2[:, 2] += depth
return points2, times2, amplitudes2, dl, dw, nl, nw
def check_rect_source_discretisation(points2, nl, nw, store):
# We assume a non-rotated fault plane
N_CRITICAL = 8
points = points2.T.reshape((3, nl, nw))
if points.size <= N_CRITICAL:
logger.warning('RectangularSource is defined by only %d sub-sources!'
% points.size)
return True
distances = num.sqrt(
(points[0, 0, :] - points[0, 1, :])**2 +
(points[1, 0, :] - points[1, 1, :])**2 +
(points[2, 0, :] - points[2, 1, :])**2)
depths = points[2, 0, :]
vs_profile = store.config.get_vs(
lat=0., lon=0.,
points=num.repeat(depths[:, num.newaxis], 3, axis=1),
interpolation='multilinear')
min_wavelength = vs_profile * (store.config.deltat * 2)
if not num.all(min_wavelength > distances/2):
return False
return True
def outline_rect_source(strike, dip, length, width, anchor):
ln = length
wd = width
points = num.array(
[[-0.5 * ln, -0.5 * wd, 0.],
[0.5 * ln, -0.5 * wd, 0.],
[0.5 * ln, 0.5 * wd, 0.],
[-0.5 * ln, 0.5 * wd, 0.],
[-0.5 * ln, -0.5 * wd, 0.]])
anch_x, anch_y = map_anchor[anchor]
points[:, 0] -= anch_x * 0.5 * length
points[:, 1] -= anch_y * 0.5 * width
rotmat = num.asarray(
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
return num.dot(rotmat.T, points.T).T
def from_plane_coords(
strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
lat=0., lon=0.,
north_shift=0, east_shift=0,
anchor='top', cs='xy'):
ln = length
wd = width
x_abs = []
y_abs = []
if not isinstance(x_plane_coords, list):
x_plane_coords = [x_plane_coords]
y_plane_coords = [y_plane_coords]
for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
points = num.array(
[[-0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.],
[0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.],
[0.5 * ln*x_plane, 0.5 * wd*y_plane, 0.],
[-0.5 * ln*x_plane, 0.5 * wd*y_plane, 0.],
[-0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.]])
anch_x, anch_y = map_anchor[anchor]
points[:, 0] -= anch_x * 0.5 * length
points[:, 1] -= anch_y * 0.5 * width
rotmat = num.asarray(
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
points = num.dot(rotmat.T, points.T).T
points[:, 0] += north_shift
points[:, 1] += east_shift
points[:, 2] += depth
if cs in ('latlon', 'lonlat'):
latlon = ne_to_latlon(lat, lon,
points[:, 0], points[:, 1])
latlon = num.array(latlon).T
x_abs.append(latlon[1:2, 1])
y_abs.append(latlon[2:3, 0])
if cs == 'xy':
x_abs.append(points[1:2, 1])
y_abs.append(points[2:3, 0])
if cs == 'lonlat':
return y_abs, x_abs
else:
return x_abs, y_abs
class InvalidGridDef(Exception):
pass
class Range(SObject):
'''
Convenient range specification.
Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
Range('0 .. 10k : 1k')
Range(start=0., stop=10e3, step=1e3)
Range(0, 10e3, 1e3)
Range('0 .. 10k @ 11')
Range(start=0., stop=10*km, n=11)
Range(0, 10e3, n=11)
Range(values=[x*1e3 for x in range(11)])
Depending on the use context, it can be possible to omit any part of the
specification. E.g. in the context of extracting a subset of an already
existing range, the existing range's specification values would be filled
in where missing.
The values are distributed with equal spacing, unless the ``spacing``
argument is modified. The values can be created offset or relative to an
external base value with the ``relative`` argument if the use context
supports this.
The range specification can be expressed with a short string
representation::
'start .. stop @ num | spacing, relative'
'start .. stop : step | spacing, relative'
most parts of the expression can be omitted if not needed. Whitespace is
allowed for readability but can also be omitted.
'''
start = Float.T(optional=True)
stop = Float.T(optional=True)
step = Float.T(optional=True)
n = Int.T(optional=True)
values = Array.T(optional=True, dtype=num.float, shape=(None,))
spacing = StringChoice.T(
choices=['lin', 'log', 'symlog'],
default='lin',
optional=True)
relative = StringChoice.T(
choices=['', 'add', 'mult'],
default='',
optional=True)
pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
r'(\|(?P<stuff>.+))?$')
def __init__(self, *args, **kwargs):
d = {}
if len(args) == 1:
d = self.parse(args[0])
elif len(args) in (2, 3):
d['start'], d['stop'] = [float(x) for x in args[:2]]
if len(args) == 3:
d['step'] = float(args[2])
for k, v in kwargs.items():
if k in d:
raise ArgumentError('%s specified more than once' % k)
d[k] = v
SObject.__init__(self, **d)
def __str__(self):
def sfloat(x):
if x is not None:
return '%g' % x
else:
return ''
if self.values:
return ','.join('%g' % x for x in self.values)
if self.start is None and self.stop is None:
s0 = ''
else:
s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
s1 = ''
if self.step is not None:
s1 = [' : %g', ':%g'][s0 == ''] % self.step
elif self.n is not None:
s1 = [' @ %i', '@%i'][s0 == ''] % self.n
if self.spacing == 'lin' and self.relative == '':
s2 = ''
else:
x = []
if self.spacing != 'lin':
x.append(self.spacing)
if self.relative != '':
x.append(self.relative)
s2 = ' | %s' % ','.join(x)
return s0 + s1 + s2
@classmethod
def parse(cls, s):
s = re.sub(r'\s+', '', s)
m = cls.pattern.match(s)
if not m:
try:
vals = [ufloat(x) for x in s.split(',')]
except Exception:
raise InvalidGridDef(
'"%s" is not a valid range specification' % s)
return dict(values=num.array(vals, dtype=num.float))
d = m.groupdict()
try:
start = ufloat_or_none(d['start'])
stop = ufloat_or_none(d['stop'])
step = ufloat_or_none(d['step'])
n = int_or_none(d['n'])
except Exception:
raise InvalidGridDef(
'"%s" is not a valid range specification' % s)
spacing = 'lin'
relative = ''
if d['stuff'] is not None:
t = d['stuff'].split(',')
for x in t:
if x in cls.spacing.choices:
spacing = x
elif x and x in cls.relative.choices:
relative = x
else:
raise InvalidGridDef(
'"%s" is not a valid range specification' % s)
return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
relative=relative)
def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
if self.values:
return self.values
start = self.start
stop = self.stop
step = self.step
n = self.n
swap = step is not None and step < 0.
if start is None:
start = [mi, ma][swap]
if stop is None:
stop = [ma, mi][swap]
if step is None and inc is not None:
step = [inc, -inc][ma < mi]
if start is None or stop is None:
raise InvalidGridDef(
'Cannot use range specification "%s" without start '
'and stop in this context' % self)
if step is None and n is None:
step = stop - start
if n is None:
if (step < 0) != (stop - start < 0):
raise InvalidGridDef(
'Range specification "%s" has inconsistent ordering '
'(step < 0 => stop > start)' % self)
n = int(round((stop - start) / step)) + 1
stop2 = start + (n - 1) * step
if abs(stop - stop2) > eps:
n = int(math.floor((stop - start) / step)) + 1
stop = start + (n - 1) * step
else:
stop = stop2
if start == stop:
n = 1
if self.spacing == 'lin':
vals = num.linspace(start, stop, n)
elif self.spacing in ('log', 'symlog'):
if start > 0. and stop > 0.:
vals = num.exp(num.linspace(num.log(start),
num.log(stop), n))
elif start < 0. and stop < 0.:
vals = -num.exp(num.linspace(num.log(-start),
num.log(-stop), n))
else:
raise InvalidGridDef(
'Log ranges should not include or cross zero '
'(in range specification "%s").' % self)
if self.spacing == 'symlog':
nvals = - vals
vals = num.concatenate((nvals[::-1], vals))
if self.relative in ('add', 'mult') and base is None:
raise InvalidGridDef(
'Cannot use relative range specification in this context.')
vals = self.make_relative(base, vals)
return list(map(float, vals))
def make_relative(self, base, vals):
if self.relative == 'add':
vals += base
if self.relative == 'mult':
vals *= base
return vals
class GridDefElement(Object):
param = meta.StringID.T()
rs = Range.T()
def __init__(self, shorthand=None, **kwargs):
if shorthand is not None:
t = shorthand.split('=')
if len(t) != 2:
raise InvalidGridDef(
'Invalid grid specification element: %s' % shorthand)
sp, sr = t[0].strip(), t[1].strip()
kwargs['param'] = sp
kwargs['rs'] = Range(sr)
Object.__init__(self, **kwargs)
def shorthand(self):
return self.param + ' = ' + str(self.rs)
class GridDef(Object):
elements = List.T(GridDefElement.T())
def __init__(self, shorthand=None, **kwargs):
if shorthand is not None:
t = shorthand.splitlines()
tt = []
for x in t:
x = x.strip()
if x:
tt.extend(x.split(';'))
elements = []
for se in tt:
elements.append(GridDef(se))
kwargs['elements'] = elements
Object.__init__(self, **kwargs)
def shorthand(self):
return '; '.join(str(x) for x in self.elements)
class Cloneable(object):
def __iter__(self):
return iter(self.T.propnames)
def __getitem__(self, k):
if k not in self.keys():
raise KeyError(k)
return getattr(self, k)
def __setitem__(self, k, v):
if k not in self.keys():
raise KeyError(k)
return setattr(self, k, v)
def clone(self, **kwargs):
'''
Make a copy of the object.
A new object of the same class is created and initialized with the
parameters of the object on which this method is called on. If
``kwargs`` are given, these are used to override any of the
initialization parameters.
'''
d = dict(self)
for k in d:
v = d[k]
if isinstance(v, Cloneable):
d[k] = v.clone()
d.update(kwargs)
return self.__class__(**d)
@classmethod
def keys(cls):
'''
Get list of the source model's parameter names.
'''
return cls.T.propnames
class STF(Object, Cloneable):
'''
Base class for source time functions.
'''
def __init__(self, effective_duration=None, **kwargs):
if effective_duration is not None:
kwargs['duration'] = effective_duration / \
self.factor_duration_to_effective()
Object.__init__(self, **kwargs)
@classmethod
def factor_duration_to_effective(cls):
return 1.0
def centroid_time(self, tref):
return tref
@property
def effective_duration(self):
return self.duration * self.factor_duration_to_effective()
def discretize_t(self, deltat, tref):
tl = math.floor(tref / deltat) * deltat
th = math.ceil(tref / deltat) * deltat
if tl == th:
return num.array([tl], dtype=num.float), num.ones(1)
else:
return (
num.array([tl, th], dtype=num.float),
num.array([th - tref, tref - tl], dtype=num.float) / deltat)
def base_key(self):
return (type(self).__name__,)
g_unit_pulse = STF()
def sshift(times, amplitudes, tshift, deltat):
t0 = math.floor(tshift / deltat) * deltat
t1 = math.ceil(tshift / deltat) * deltat
if t0 == t1:
return times, amplitudes
amplitudes2 = num.zeros(amplitudes.size + 1, dtype=num.float)
amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
times2 = num.arange(times.size + 1, dtype=num.float) * \
deltat + times[0] + t0
return times2, amplitudes2
class BoxcarSTF(STF):
'''
Boxcar type source time function.
.. figure :: /static/stf-BoxcarSTF.svg
:width: 40%
:align: center
:alt: boxcar source time function
'''
duration = Float.T(
default=0.0,
help='duration of the boxcar')
anchor = Float.T(
default=0.0,
help='anchor point with respect to source.time: ('
'-1.0: left -> source duration [0, T] ~ hypocenter time, '
' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
'+1.0: right -> source duration [-T, 0] ~ rupture end time)')
@classmethod
def factor_duration_to_effective(cls):
return 1.0
def centroid_time(self, tref):
return tref - 0.5 * self.duration * self.anchor
def discretize_t(self, deltat, tref):
tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
tmin = round(tmin_stf / deltat) * deltat
tmax = round(tmax_stf / deltat) * deltat
nt = int(round((tmax - tmin) / deltat)) + 1
times = num.linspace(tmin, tmax, nt)
amplitudes = num.ones_like(times)
if times.size > 1:
t_edges = num.linspace(
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
t = tmin_stf + self.duration * num.array(
[0.0, 0.0, 1.0, 1.0], dtype=num.float)
f = num.array([0., 1., 1., 0.], dtype=num.float)
amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
amplitudes /= num.sum(amplitudes)
tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
return sshift(times, amplitudes, -tshift, deltat)
def base_key(self):
return (type(self).__name__, self.duration, self.anchor)
class TriangularSTF(STF):
'''
Triangular type source time function.
.. figure :: /static/stf-TriangularSTF.svg
:width: 40%
:align: center
:alt: triangular source time function
'''
duration = Float.T(
default=0.0,
help='baseline of the triangle')
peak_ratio = Float.T(
default=0.5,
help='fraction of time compared to duration, '
'when the maximum amplitude is reached')
anchor = Float.T(
default=0.0,
help='anchor point with respect to source.time: ('
'-1.0: left -> source duration [0, T] ~ hypocenter time, '
' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
'+1.0: right -> source duration [-T, 0] ~ rupture end time)')
@classmethod
def factor_duration_to_effective(cls, peak_ratio=None):
if peak_ratio is None:
peak_ratio = cls.peak_ratio.default()
return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
def __init__(self, effective_duration=None, **kwargs):
if effective_duration is not None:
kwargs['duration'] = effective_duration / \
self.factor_duration_to_effective(
kwargs.get('peak_ratio', None))
STF.__init__(self, **kwargs)
@property
def centroid_ratio(self):
ra = self.peak_ratio
rb = 1.0 - ra
return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
def centroid_time(self, tref):
ca = self.centroid_ratio
cb = 1.0 - ca
if self.anchor <= 0.:
return tref - ca * self.duration * self.anchor
else:
return tref - cb * self.duration * self.anchor
@property
def effective_duration(self):
return self.duration * self.factor_duration_to_effective(
self.peak_ratio)
def tminmax_stf(self, tref):
ca = self.centroid_ratio
cb = 1.0 - ca
if self.anchor <= 0.:
tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
tmax_stf = tmin_stf + self.duration
else:
tmax_stf = tref + cb * self.duration * (1. - self.anchor)
tmin_stf = tmax_stf - self.duration
return tmin_stf, tmax_stf
def discretize_t(self, deltat, tref):
tmin_stf, tmax_stf = self.tminmax_stf(tref)
tmin = round(tmin_stf / deltat) * deltat
tmax = round(tmax_stf / deltat) * deltat
nt = int(round((tmax - tmin) / deltat)) + 1
if nt > 1:
t_edges = num.linspace(
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
t = tmin_stf + self.duration * num.array(
[0.0, self.peak_ratio, 1.0], dtype=num.float)
f = num.array([0., 1., 0.], dtype=num.float)
amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
amplitudes /= num.sum(amplitudes)
else:
amplitudes = num.ones(1)
times = num.linspace(tmin, tmax, nt)
return times, amplitudes
def base_key(self):
return (
type(self).__name__, self.duration, self.peak_ratio, self.anchor)
class HalfSinusoidSTF(STF):
'''
Half sinusoid type source time function.
.. figure :: /static/stf-HalfSinusoidSTF.svg
:width: 40%
:align: center
:alt: half-sinusouid source time function
'''
duration = Float.T(
default=0.0,
help='duration of the half-sinusoid (baseline)')
anchor = Float.T(
default=0.0,
help='anchor point with respect to source.time: ('
'-1.0: left -> source duration [0, T] ~ hypocenter time, '
' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
'+1.0: right -> source duration [-T, 0] ~ rupture end time)')
exponent = Int.T(
default=1,
help='set to 2 to use square of the half-period sinusoidal function.')
def __init__(self, effective_duration=None, **kwargs):
if effective_duration is not None:
kwargs['duration'] = effective_duration / \
self.factor_duration_to_effective(
kwargs.get('exponent', 1))
STF.__init__(self, **kwargs)
@classmethod
def factor_duration_to_effective(cls, exponent):
if exponent == 1:
return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
elif exponent == 2:
return math.sqrt(math.pi**2 - 6) / math.pi
else:
raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
@property
def effective_duration(self):
return self.duration * self.factor_duration_to_effective(self.exponent)
def centroid_time(self, tref):
return tref - 0.5 * self.duration * self.anchor
def discretize_t(self, deltat, tref):
tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
tmin = round(tmin_stf / deltat) * deltat
tmax = round(tmax_stf / deltat) * deltat
nt = int(round((tmax - tmin) / deltat)) + 1
if nt > 1:
t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
if self.exponent == 1:
fint = -num.cos(
(t_edges - tmin_stf) * (math.pi / self.duration))
elif self.exponent == 2:
fint = (t_edges - tmin_stf) / self.duration \
- 1.0 / (2.0 * math.pi) * num.sin(
(t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
else:
raise ValueError(
'Exponent for HalfSinusoidSTF must be 1 or 2.')
amplitudes = fint[1:] - fint[:-1]
amplitudes /= num.sum(amplitudes)
else:
amplitudes = num.ones(1)
times = num.linspace(tmin, tmax, nt)
return times, amplitudes
def base_key(self):
return (type(self).__name__, self.duration, self.anchor)
class SmoothRampSTF(STF):
'''Smooth-ramp type source time function for near-field displacement.
Based on moment function of double-couple point source proposed by Bruestle
and Mueller (PEPI, 1983).
.. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
earthquakes from Love-wave modelling for regional distances, PEPI 32,
312-324.
.. figure :: /static/stf-SmoothRampSTF.svg
:width: 40%
:alt: smooth ramp source time function
'''
duration = Float.T(
default=0.0,
help='duration of the ramp (baseline)')
rise_ratio = Float.T(
default=0.5,
help='fraction of time compared to duration, '
'when the maximum amplitude is reached')
anchor = Float.T(
default=0.0,
help='anchor point with respect to source.time: ('
'-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
'0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
'+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
def discretize_t(self, deltat, tref):
tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
tmin = round(tmin_stf / deltat) * deltat
tmax = round(tmax_stf / deltat) * deltat
D = round((tmax - tmin) / deltat) * deltat
nt = int(round(D / deltat)) + 1
times = num.linspace(tmin, tmax, nt)
if nt > 1:
rise_time = self.rise_ratio * self.duration
amplitudes = num.ones_like(times)
tp = tmin + rise_time
ii = num.where(times <= tp)
t_inc = times[ii]
a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
amplitudes /= num.sum(amplitudes)
else:
amplitudes = num.ones(1)
return times, amplitudes
def base_key(self):
return (type(self).__name__,
self.duration, self.rise_ratio, self.anchor)
class ResonatorSTF(STF):
'''
Simple resonator like source time function.
.. math ::
f(t) = 0 for t < 0
f(t) = e^{-t/tau} * sin(2 * pi * f * t)
.. figure :: /static/stf-SmoothRampSTF.svg
:width: 40%
:alt: smooth ramp source time function
'''
duration = Float.T(
default=0.0,
help='decay time')
frequency = Float.T(
default=1.0,
help='resonance frequency')
def discretize_t(self, deltat, tref):
tmin_stf = tref
tmax_stf = tref + self.duration * 3
tmin = math.floor(tmin_stf / deltat) * deltat
tmax = math.ceil(tmax_stf / deltat) * deltat
times = util.arange2(tmin, tmax, deltat)
amplitudes = num.exp(-(times-tref)/self.duration) \
* num.sin(2.0 * num.pi * self.frequency * (times-tref))
return times, amplitudes
def base_key(self):
return (type(self).__name__,
self.duration, self.frequency)
class STFMode(StringChoice):
choices = ['pre', 'post']
class Source(Location, Cloneable):
'''
Base class for all source models.
'''
name = String.T(optional=True, default='')
time = Timestamp.T(
default=Timestamp.D('1970-01-01 00:00:00'),
help='source origin time.')
stf = STF.T(
optional=True,
help='source time function.')
stf_mode = STFMode.T(
default='post',
help='whether to apply source time function in pre or '
'post-processing.')
def __init__(self, **kwargs):
Location.__init__(self, **kwargs)
def update(self, **kwargs):
'''
Change some of the source models parameters.
Example::
>>> from pyrocko import gf
>>> s = gf.DCSource()
>>> s.update(strike=66., dip=33.)
>>> print s
--- !pf.DCSource
depth: 0.0
time: 1970-01-01 00:00:00
magnitude: 6.0
strike: 66.0
dip: 33.0
rake: 0.0
'''
for (k, v) in kwargs.items():
self[k] = v
def grid(self, **variables):
'''
Create grid of source model variations.
:returns: :py:class:`SourceGrid` instance.
Example::
>>> from pyrocko import gf
>>> base = DCSource()
>>> R = gf.Range
>>> for s in base.grid(R('
'''
return SourceGrid(base=self, variables=variables)
def base_key(self):
'''
Get key to decide about source discretization / GF stack sharing.
When two source models differ only in amplitude and origin time, the
discretization and the GF stacking can be done only once for a unit
amplitude and a zero origin time and the amplitude and origin times of
the seismograms can be applied during post-processing of the synthetic
seismogram.
For any derived parameterized source model, this method is called to
decide if discretization and stacking of the source should be shared.
When two source models return an equal vector of values discretization
is shared.
'''
return (self.depth, self.lat, self.north_shift,
self.lon, self.east_shift, self.time, type(self).__name__) + \
self.effective_stf_pre().base_key()
def get_factor(self):
'''
Get the scaling factor to be applied during post-processing.
Discretization of the base seismogram is usually done for a unit
amplitude, because a common factor can be efficiently multiplied to
final seismograms. This eliminates to do repeat the stacking when
creating seismograms for a series of source models only differing in
amplitude.
This method should return the scaling factor to apply in the
post-processing (often this is simply the scalar moment of the source).
'''
return 1.0
def effective_stf_pre(self):
'''
Return the STF applied before stacking of the Green's functions.
This STF is used during discretization of the parameterized source
models, i.e. to produce a temporal distribution of point sources.
Handling of the STF before stacking of the GFs is less efficient but
allows to use different source time functions for different parts of
the source.
'''
if self.stf is not None and self.stf_mode == 'pre':
return self.stf
else:
return g_unit_pulse
def effective_stf_post(self):
'''
Return the STF applied after stacking of the Green's fuctions.
This STF is used in the post-processing of the synthetic seismograms.
Handling of the STF after stacking of the GFs is usually more efficient
but is only possible when a common STF is used for all subsources.
'''
if self.stf is not None and self.stf_mode == 'post':
return self.stf
else:
return g_unit_pulse
def _dparams_base(self):
return dict(times=arr(self.time),
lat=self.lat, lon=self.lon,
north_shifts=arr(self.north_shift),
east_shifts=arr(self.east_shift),
depths=arr(self.depth))
def _dparams_base_repeated(self, times):
if times is None:
return self._dparams_base()
nt = times.size
north_shifts = num.repeat(self.north_shift, nt)
east_shifts = num.repeat(self.east_shift, nt)
depths = num.repeat(self.depth, nt)
return dict(times=times,
lat=self.lat, lon=self.lon,
north_shifts=north_shifts,
east_shifts=east_shifts,
depths=depths)
def pyrocko_event(self, store=None, target=None, **kwargs):
duration = None
if self.stf:
duration = self.stf.effective_duration
return model.Event(
lat=self.lat,
lon=self.lon,
north_shift=self.north_shift,
east_shift=self.east_shift,
time=self.time,
name=self.name,
depth=self.depth,
duration=duration,
**kwargs)
def outline(self, cs='xyz'):
points = num.atleast_2d(num.zeros([1, 3]))
points[:, 0] += self.north_shift
points[:, 1] += self.east_shift
points[:, 2] += self.depth
if cs == 'xyz':
return points
elif cs == 'xy':
return points[:, :2]
elif cs in ('latlon', 'lonlat'):
latlon = ne_to_latlon(
self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T
if cs == 'latlon':
return latlon
else:
return latlon[:, ::-1]
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
if ev.depth is None:
raise ConversionError(
'Cannot convert event object to source object: '
'no depth information available')
stf = None
if ev.duration is not None:
stf = HalfSinusoidSTF(effective_duration=ev.duration)
d = dict(
name=ev.name,
time=ev.time,
lat=ev.lat,
lon=ev.lon,
north_shift=ev.north_shift,
east_shift=ev.east_shift,
depth=ev.depth,
stf=stf)
d.update(kwargs)
return cls(**d)
def get_magnitude(self):
raise NotImplementedError(
'%s does not implement get_magnitude()'
% self.__class__.__name__)
class SourceWithMagnitude(Source):
'''
Base class for sources containing a moment magnitude.
'''
magnitude = Float.T(
default=6.0,
help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
def __init__(self, **kwargs):
if 'moment' in kwargs:
mom = kwargs.pop('moment')
if 'magnitude' not in kwargs:
kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
Source.__init__(self, **kwargs)
@property
def moment(self):
return float(pmt.magnitude_to_moment(self.magnitude))
@moment.setter
def moment(self, value):
self.magnitude = float(pmt.moment_to_magnitude(value))
def pyrocko_event(self, store=None, target=None, **kwargs):
return Source.pyrocko_event(
self, store, target,
magnitude=self.magnitude,
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
if ev.magnitude:
d.update(magnitude=ev.magnitude)
d.update(kwargs)
return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
def get_magnitude(self):
return self.magnitude
class DerivedMagnitudeError(ValidationError):
pass
class SourceWithDerivedMagnitude(Source):
class __T(Source.T):
def validate_extra(self, val):
Source.T.validate_extra(self, val)
val.check_conflicts()
def check_conflicts(self):
'''
Check for parameter conflicts.
To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
on conflicts.
'''
pass
def get_magnitude(self, store=None, target=None):
raise DerivedMagnitudeError('No magnitude set.')
def get_moment(self, store=None, target=None):
return float(pmt.magnitude_to_moment(
self.get_magnitude(store, target)))
def pyrocko_moment_tensor(self, store=None, target=None):
raise NotImplementedError(
'%s does not implement pyrocko_moment_tensor()'
% self.__class__.__name__)
def pyrocko_event(self, store=None, target=None, **kwargs):
try:
mt = self.pyrocko_moment_tensor(store, target)
magnitude = self.get_magnitude()
except (DerivedMagnitudeError, NotImplementedError):
mt = None
magnitude = None
return Source.pyrocko_event(
self, store, target,
moment_tensor=mt,
magnitude=magnitude,
**kwargs)
class ExplosionSource(SourceWithDerivedMagnitude):
'''
An isotropic explosion point source.
'''
magnitude = Float.T(
optional=True,
help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
volume_change = Float.T(
optional=True,
help='volume change of the explosion/implosion or '
'the contracting/extending magmatic source. [m^3]')
discretized_source_class = meta.DiscretizedExplosionSource
def __init__(self, **kwargs):
if 'moment' in kwargs:
mom = kwargs.pop('moment')
if 'magnitude' not in kwargs:
kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
SourceWithDerivedMagnitude.__init__(self, **kwargs)
def base_key(self):
return SourceWithDerivedMagnitude.base_key(self) + \
(self.volume_change,)
def check_conflicts(self):
if self.magnitude is not None and self.volume_change is not None:
raise DerivedMagnitudeError(
'Magnitude and volume_change are both defined.')
def get_magnitude(self, store=None, target=None):
self.check_conflicts()
if self.magnitude is not None:
return self.magnitude
elif self.volume_change is not None:
moment = self.volume_change * \
self.get_moment_to_volume_change_ratio(store, target)
return float(pmt.moment_to_magnitude(abs(moment)))
else:
return float(pmt.moment_to_magnitude(1.0))
def get_volume_change(self, store=None, target=None):
self.check_conflicts()
if self.volume_change is not None:
return self.volume_change
elif self.magnitude is not None:
moment = float(pmt.magnitude_to_moment(self.magnitude))
return moment / self.get_moment_to_volume_change_ratio(
store, target)
else:
return 1.0 / self.get_moment_to_volume_change_ratio(store)
def get_moment_to_volume_change_ratio(self, store, target=None):
if store is None:
raise DerivedMagnitudeError(
'Need earth model to convert between volume change and '
'magnitude.')
points = num.array(
[[self.north_shift, self.east_shift, self.depth]], dtype=num.float)
interpolation = target.interpolation if target else 'multilinear'
try:
shear_moduli = store.config.get_shear_moduli(
self.lat, self.lon,
points=points,
interpolation=interpolation)[0]
except meta.OutOfBounds:
raise DerivedMagnitudeError(
'Could not get shear modulus at source position.')
return float(3. * shear_moduli)
def get_factor(self):
return 1.0
def discretize_basesource(self, store, target=None):
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
if self.volume_change is not None:
if self.volume_change < 0.:
amplitudes *= -1
return meta.DiscretizedExplosionSource(
m0s=amplitudes,
**self._dparams_base_repeated(times))
def pyrocko_moment_tensor(self, store=None, target=None):
a = self.get_moment(store, target) * math.sqrt(2. / 3.)
return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
class RectangularExplosionSource(ExplosionSource):
'''
Rectangular or line explosion source.
'''
discretized_source_class = meta.DiscretizedExplosionSource
strike = Float.T(
default=0.0,
help='strike direction in [deg], measured clockwise from north')
dip = Float.T(
default=90.0,
help='dip angle in [deg], measured downward from horizontal')
length = Float.T(
default=0.,
help='length of rectangular source area [m]')
width = Float.T(
default=0.,
help='width of rectangular source area [m]')
anchor = StringChoice.T(
choices=['top', 'top_left', 'top_right', 'center', 'bottom',
'bottom_left', 'bottom_right'],
default='center',
optional=True,
help='Anchor point for positioning the plane, can be: top, center or'
'bottom and also top_left, top_right,bottom_left,'
'bottom_right, center_left and center right')
nucleation_x = Float.T(
optional=True,
help='horizontal position of rupture nucleation in normalized fault '
'plane coordinates (-1 = left edge, +1 = right edge)')
nucleation_y = Float.T(
optional=True,
help='down-dip position of rupture nucleation in normalized fault '
'plane coordinates (-1 = upper edge, +1 = lower edge)')
velocity = Float.T(
default=3500.,
help='speed of explosion front [m/s]')
def base_key(self):
return Source.base_key(self) + (self.strike, self.dip, self.length,
self.width, self.nucleation_x,
self.nucleation_y, self.velocity,
self.anchor)
def discretize_basesource(self, store, target=None):
if self.nucleation_x is not None:
nucx = self.nucleation_x * 0.5 * self.length
else:
nucx = None
if self.nucleation_y is not None:
nucy = self.nucleation_y * 0.5 * self.width
else:
nucy = None
stf = self.effective_stf_pre()
points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
store.config.deltas, store.config.deltat,
self.time, self.north_shift, self.east_shift, self.depth,
self.strike, self.dip, self.length, self.width, self.anchor,
self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
amplitudes /= num.sum(amplitudes)
amplitudes *= self.get_moment(store, target)
return meta.DiscretizedExplosionSource(
lat=self.lat,
lon=self.lon,
times=times,
north_shifts=points[:, 0],
east_shifts=points[:, 1],
depths=points[:, 2],
m0s=amplitudes)
def outline(self, cs='xyz'):
points = outline_rect_source(self.strike, self.dip, self.length,
self.width, self.anchor)
points[:, 0] += self.north_shift
points[:, 1] += self.east_shift
points[:, 2] += self.depth
if cs == 'xyz':
return points
elif cs == 'xy':
return points[:, :2]
elif cs in ('latlon', 'lonlat'):
latlon = ne_to_latlon(
self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T
if cs == 'latlon':
return latlon
else:
return latlon[:, ::-1]
def get_nucleation_abs_coord(self, cs='xy'):
if self.nucleation_x is None:
return None, None
coords = from_plane_coords(self.strike, self.dip, self.length,
self.width, self.depth, self.nucleation_x,
self.nucleation_y, lat=self.lat,
lon=self.lon, north_shift=self.north_shift,
east_shift=self.east_shift, cs=cs)
return coords
class DCSource(SourceWithMagnitude):
'''
A double-couple point source.
'''
strike = Float.T(
default=0.0,
help='strike direction in [deg], measured clockwise from north')
dip = Float.T(
default=90.0,
help='dip angle in [deg], measured downward from horizontal')
rake = Float.T(
default=0.0,
help='rake angle in [deg], '
'measured counter-clockwise from right-horizontal '
'in on-plane view')
discretized_source_class = meta.DiscretizedMTSource
def base_key(self):
return Source.base_key(self) + (self.strike, self.dip, self.rake)
def get_factor(self):
return float(pmt.magnitude_to_moment(self.magnitude))
def discretize_basesource(self, store, target=None):
mot = pmt.MomentTensor(
strike=self.strike, dip=self.dip, rake=self.rake)
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
return meta.DiscretizedMTSource(
m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
**self._dparams_base_repeated(times))
def pyrocko_moment_tensor(self, store=None, target=None):
return pmt.MomentTensor(
strike=self.strike,
dip=self.dip,
rake=self.rake,
scalar_moment=self.moment)
def pyrocko_event(self, store=None, target=None, **kwargs):
return SourceWithMagnitude.pyrocko_event(
self, store, target,
moment_tensor=self.pyrocko_moment_tensor(store, target),
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
mt = ev.moment_tensor
if mt:
(strike, dip, rake), _ = mt.both_strike_dip_rake()
d.update(
strike=float(strike),
dip=float(dip),
rake=float(rake),
magnitude=float(mt.moment_magnitude()))
d.update(kwargs)
return super(DCSource, cls).from_pyrocko_event(ev, **d)
class CLVDSource(SourceWithMagnitude):
'''
A pure CLVD point source.
'''
discretized_source_class = meta.DiscretizedMTSource
azimuth = Float.T(
default=0.0,
help='azimuth direction of largest dipole, clockwise from north [deg]')
dip = Float.T(
default=90.,
help='dip direction of largest dipole, downward from horizontal [deg]')
def base_key(self):
return Source.base_key(self) + (self.azimuth, self.dip)
def get_factor(self):
return float(pmt.magnitude_to_moment(self.magnitude))
@property
def m6(self):
a = math.sqrt(4. / 3.) * self.get_factor()
m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
rotmat1 = pmt.euler_to_matrix(
d2r * (self.dip - 90.),
d2r * (self.azimuth - 90.),
0.)
m = rotmat1.T * m * rotmat1
return pmt.to6(m)
@property
def m6_astuple(self):
return tuple(self.m6.tolist())
def discretize_basesource(self, store, target=None):
factor = self.get_factor()
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
return meta.DiscretizedMTSource(
m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
**self._dparams_base_repeated(times))
def pyrocko_moment_tensor(self, store=None, target=None):
return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
def pyrocko_event(self, store=None, target=None, **kwargs):
mt = self.pyrocko_moment_tensor(store, target)
return Source.pyrocko_event(
self, store, target,
moment_tensor=self.pyrocko_moment_tensor(store, target),
magnitude=float(mt.moment_magnitude()),
**kwargs)
class VLVDSource(SourceWithDerivedMagnitude):
'''
Volumetric linear vector dipole source.
This source is a parameterization for a restricted moment tensor point
source, useful to represent dyke or sill like inflation or deflation
sources. The restriction is such that the moment tensor is rotational
symmetric. It can be represented by a superposition of a linear vector
dipole (here we use a CLVD for convenience) and an isotropic component. The
restricted moment tensor has 4 degrees of freedom: 2 independent
eigenvalues and 2 rotation angles orienting the the symmetry axis.
In this parameterization, the isotropic component is controlled by
``volume_change``. To define the moment tensor, it must be converted to the
scalar moment of the the MT's isotropic component. For the conversion, the
shear modulus at the source's position must be known. This value is
extracted from the earth model defined in the GF store in use.
The CLVD part by controlled by its scalar moment :math:`M_0`:
``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
positiv or negativ CLVD (the sign of the largest eigenvalue).
'''
discretized_source_class = meta.DiscretizedMTSource
azimuth = Float.T(
default=0.0,
help='azimuth direction of symmetry axis, clockwise from north [deg].')
dip = Float.T(
default=90.,
help='dip direction of symmetry axis, downward from horizontal [deg].')
volume_change = Float.T(
default=0.,
help='volume change of the inflation/deflation [m^3].')
clvd_moment = Float.T(
default=0.,
help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
'controls the sign of the CLVD (the sign of its largest '
'eigenvalue).')
def get_moment_to_volume_change_ratio(self, store, target):
if store is None or target is None:
raise DerivedMagnitudeError(
'Need earth model to convert between volume change and '
'magnitude.')
points = num.array(
[[self.north_shift, self.east_shift, self.depth]], dtype=num.float)
try:
shear_moduli = store.config.get_shear_moduli(
self.lat, self.lon,
points=points,
interpolation=target.interpolation)[0]
except meta.OutOfBounds:
raise DerivedMagnitudeError(
'Could not get shear modulus at source position.')
return float(3. * shear_moduli)
def base_key(self):
return Source.base_key(self) + \
(self.azimuth, self.dip, self.volume_change, self.clvd_moment)
def get_magnitude(self, store=None, target=None):
mt = self.pyrocko_moment_tensor(store, target)
return float(pmt.moment_to_magnitude(mt.moment))
def get_m6(self, store, target):
a = math.sqrt(4. / 3.) * self.clvd_moment
m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
rotmat1 = pmt.euler_to_matrix(
d2r * (self.dip - 90.),
d2r * (self.azimuth - 90.),
0.)
m_clvd = rotmat1.T * m_clvd * rotmat1
m_iso = self.volume_change * \
self.get_moment_to_volume_change_ratio(store, target)
m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 0., 0.,) * math.sqrt(2./3)
m = pmt.to6(m_clvd) + pmt.to6(m_iso)
return m
def get_moment(self, store=None, target=None):
return float(pmt.magnitude_to_moment(
self.get_magnitude(store, target)))
def get_m6_astuple(self, store, target):
m6 = self.get_m6(store, target)
return tuple(m6.tolist())
def discretize_basesource(self, store, target=None):
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
m6 = self.get_m6(store, target)
m6 *= amplitudes / self.get_factor()
return meta.DiscretizedMTSource(
m6s=m6[num.newaxis, :],
**self._dparams_base_repeated(times))
def pyrocko_moment_tensor(self, store=None, target=None):
m6_astuple = self.get_m6_astuple(store, target)
return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
class MTSource(Source):
'''
A moment tensor point source.
'''
discretized_source_class = meta.DiscretizedMTSource
mnn = Float.T(
default=1.,
help='north-north component of moment tensor in [Nm]')
mee = Float.T(
default=1.,
help='east-east component of moment tensor in [Nm]')
mdd = Float.T(
default=1.,
help='down-down component of moment tensor in [Nm]')
mne = Float.T(
default=0.,
help='north-east component of moment tensor in [Nm]')
mnd = Float.T(
default=0.,
help='north-down component of moment tensor in [Nm]')
med = Float.T(
default=0.,
help='east-down component of moment tensor in [Nm]')
def __init__(self, **kwargs):
if 'm6' in kwargs:
for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
kwargs.pop('m6')):
kwargs[k] = float(v)
Source.__init__(self, **kwargs)
@property
def m6(self):
return num.array(self.m6_astuple)
@property
def m6_astuple(self):
return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
@m6.setter
def m6(self, value):
self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
def base_key(self):
return Source.base_key(self) + self.m6_astuple
def discretize_basesource(self, store, target=None):
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
return meta.DiscretizedMTSource(
m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
**self._dparams_base_repeated(times))
def get_magnitude(self, store=None, target=None):
m6 = self.m6
return pmt.moment_to_magnitude(
math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
math.sqrt(2.))
def pyrocko_moment_tensor(self, store=None, target=None):
return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
def pyrocko_event(self, store=None, target=None, **kwargs):
mt = self.pyrocko_moment_tensor(store, target)
return Source.pyrocko_event(
self, store, target,
moment_tensor=self.pyrocko_moment_tensor(store, target),
magnitude=float(mt.moment_magnitude()),
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
mt = ev.moment_tensor
if mt:
d.update(m6=tuple(map(float, mt.m6())))
else:
if ev.magnitude is not None:
mom = pmt.magnitude_to_moment(ev.magnitude)
v = math.sqrt(2./3.) * mom
d.update(m6=(v, v, v, 0., 0., 0.))
d.update(kwargs)
return super(MTSource, cls).from_pyrocko_event(ev, **d)
map_anchor = {
'center': (0.0, 0.0),
'center_left': (-1.0, 0.0),
'center_right': (1.0, 0.0),
'top': (0.0, -1.0),
'top_left': (-1.0, -1.0),
'top_right': (1.0, -1.0),
'bottom': (0.0, 1.0),
'bottom_left': (-1.0, 1.0),
'bottom_right': (1.0, 1.0)}
class RectangularSource(SourceWithDerivedMagnitude):
'''
Classical Haskell source model modified for bilateral rupture.
'''
discretized_source_class = meta.DiscretizedMTSource
magnitude = Float.T(
optional=True,
help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
strike = Float.T(
default=0.0,
help='strike direction in [deg], measured clockwise from north')
dip = Float.T(
default=90.0,
help='dip angle in [deg], measured downward from horizontal')
rake = Float.T(
default=0.0,
help='rake angle in [deg], '
'measured counter-clockwise from right-horizontal '
'in on-plane view')
length = Float.T(
default=0.,
help='length of rectangular source area [m]')
width = Float.T(
default=0.,
help='width of rectangular source area [m]')
anchor = StringChoice.T(
choices=['top', 'top_left', 'top_right', 'center', 'bottom',
'bottom_left', 'bottom_right'],
default='center',
optional=True,
help='Anchor point for positioning the plane, can be: top, center or'
'bottom and also top_left, top_right,bottom_left,'
'bottom_right, center_left and center right')
nucleation_x = Float.T(
optional=True,
help='horizontal position of rupture nucleation in normalized fault '
'plane coordinates (-1 = left edge, +1 = right edge)')
nucleation_y = Float.T(
optional=True,
help='down-dip position of rupture nucleation in normalized fault '
'plane coordinates (-1 = upper edge, +1 = lower edge)')
velocity = Float.T(
default=3500.,
help='speed of rupture front [m/s]')
slip = Float.T(
optional=True,
help='Slip on the rectangular source area [m]')
opening_fraction = Float.T(
default=0.,
help='Determines fraction of slip related to opening. '
'(``-1``: pure tensile closing, '
'``0``: pure shear, '
'``1``: pure tensile opening)')
decimation_factor = Int.T(
optional=True,
default=1,
help='Sub-source decimation factor, a larger decimation will'
' make the result inaccurate but shorten the necessary'
' computation time (use for testing puposes only).')
def __init__(self, **kwargs):
if 'moment' in kwargs:
mom = kwargs.pop('moment')
if 'magnitude' not in kwargs:
kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
SourceWithDerivedMagnitude.__init__(self, **kwargs)
def base_key(self):
return SourceWithDerivedMagnitude.base_key(self) + (
self.magnitude,
self.slip,
self.strike,
self.dip,
self.rake,
self.length,
self.width,
self.nucleation_x,
self.nucleation_y,
self.velocity,
self.decimation_factor,
self.anchor)
def check_conflicts(self):
if self.magnitude is not None and self.slip is not None:
raise DerivedMagnitudeError(
'Magnitude and slip are both defined.')
def get_magnitude(self, store=None, target=None):
self.check_conflicts()
if self.magnitude is not None:
return self.magnitude
elif self.slip is not None:
if None in (store, target):
raise DerivedMagnitudeError(
'Magnitude for a rectangular source with slip defined '
'can only be derived when earth model and target '
'interpolation method are available.')
amplitudes = self._discretize(store, target)[2]
if amplitudes.ndim == 2:
# CLVD component has no net moment, leave out
return float(pmt.moment_to_magnitude(
num.sum(num.abs(amplitudes[0:2, :]).sum())))
else:
return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
else:
return float(pmt.moment_to_magnitude(1.0))
def get_factor(self):
return 1.0
def get_slip_tensile(self):
return self.slip * self.opening_fraction
def get_slip_shear(self):
return self.slip - abs(self.get_slip_tensile)
def _discretize(self, store, target):
if self.nucleation_x is not None:
nucx = self.nucleation_x * 0.5 * self.length
else:
nucx = None
if self.nucleation_y is not None:
nucy = self.nucleation_y * 0.5 * self.width
else:
nucy = None
stf = self.effective_stf_pre()
points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
store.config.deltas, store.config.deltat,
self.time, self.north_shift, self.east_shift, self.depth,
self.strike, self.dip, self.length, self.width, self.anchor,
self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
decimation_factor=self.decimation_factor)
if self.slip is not None:
if target is not None:
interpolation = target.interpolation
else:
interpolation = 'nearest_neighbor'
logger.warn(
'no target information available, will use '
'"nearest_neighbor" interpolation when extracting shear '
'modulus from earth model')
shear_moduli = store.config.get_shear_moduli(
self.lat, self.lon,
points=points,
interpolation=interpolation)
tensile_slip = self.get_slip_tensile()
shear_slip = self.slip - abs(tensile_slip)
amplitudes_total = [shear_moduli * shear_slip]
if tensile_slip != 0:
bulk_moduli = store.config.get_bulk_moduli(
self.lat, self.lon,
points=points,
interpolation=interpolation)
tensile_iso = bulk_moduli * tensile_slip
tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
amplitudes_total.extend([tensile_iso, tensile_clvd])
amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
amplitudes * dl * dw
else:
# normalization to retain total moment
amplitudes_norm = amplitudes / num.sum(amplitudes)
moment = self.get_moment(store, target)
amplitudes_total = [
amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
if self.opening_fraction != 0.:
amplitudes_total.append(
amplitudes_norm * self.opening_fraction * moment)
amplitudes_total = num.vstack(amplitudes_total).squeeze()
return points, times, num.atleast_1d(amplitudes_total), dl, dw
def discretize_basesource(self, store, target=None):
points, times, amplitudes, dl, dw = self._discretize(store, target)
mot = pmt.MomentTensor(
strike=self.strike, dip=self.dip, rake=self.rake)
m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
if amplitudes.ndim == 1:
m6s[:, :] *= amplitudes[:, num.newaxis]
elif amplitudes.ndim == 2:
# shear MT components
rotmat1 = pmt.euler_to_matrix(
d2r * self.dip, d2r * self.strike, d2r * -self.rake)
m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
if amplitudes.shape[0] == 2:
# tensile MT components - moment/magnitude input
tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1)
m6s_tensile = rot_tensile[
num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
m6s += m6s_tensile
elif amplitudes.shape[0] == 3:
# tensile MT components - slip input
iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
rot_iso = pmt.to6(rotmat1.T * iso * rotmat1)
rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1)
m6s_iso = rot_iso[
num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
m6s_clvd = rot_clvd[
num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
m6s += m6s_iso + m6s_clvd
else:
raise ValueError('Unknwown amplitudes shape!')
else:
raise ValueError(
'Unexpected dimension of {}'.format(amplitudes.ndim))
ds = meta.DiscretizedMTSource(
lat=self.lat,
lon=self.lon,
times=times,
north_shifts=points[:, 0],
east_shifts=points[:, 1],
depths=points[:, 2],
m6s=m6s)
return ds
def outline(self, cs='xyz'):
points = outline_rect_source(self.strike, self.dip, self.length,
self.width, self.anchor)
points[:, 0] += self.north_shift
points[:, 1] += self.east_shift
points[:, 2] += self.depth
if cs == 'xyz':
return points
elif cs == 'xy':
return points[:, :2]
elif cs in ('latlon', 'lonlat'):
latlon = ne_to_latlon(
self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T
if cs == 'latlon':
return latlon
else:
return latlon[:, ::-1]
def get_nucleation_abs_coord(self, cs='xy'):
if self.nucleation_x is None:
return None, None
coords = from_plane_coords(self.strike, self.dip, self.length,
self.width, self.depth, self.nucleation_x,
self.nucleation_y, lat=self.lat,
lon=self.lon, north_shift=self.north_shift,
east_shift=self.east_shift, cs=cs)
return coords
def pyrocko_moment_tensor(self, store=None, target=None):
return pmt.MomentTensor(
strike=self.strike,
dip=self.dip,
rake=self.rake,
scalar_moment=self.get_moment(store, target))
def pyrocko_event(self, store=None, target=None, **kwargs):
return SourceWithDerivedMagnitude.pyrocko_event(
self, store, target,
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
mt = ev.moment_tensor
if mt:
(strike, dip, rake), _ = mt.both_strike_dip_rake()
d.update(
strike=float(strike),
dip=float(dip),
rake=float(rake),
magnitude=float(mt.moment_magnitude()))
d.update(kwargs)
return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
class DoubleDCSource(SourceWithMagnitude):
'''
Two double-couple point sources separated in space and time.
Moment share between the sub-sources is controlled by the
parameter mix.
The position of the subsources is dependent on the moment
distribution between the two sources. Depth, east and north
shift are given for the centroid between the two double-couples.
The subsources will positioned according to their moment shares
around this centroid position.
This is done according to their delta parameters, which are
therefore in relation to that centroid.
Note that depth of the subsources therefore can be
depth+/-delta_depth. For shallow earthquakes therefore
the depth has to be chosen deeper to avoid sampling
above surface.
'''
strike1 = Float.T(
default=0.0,
help='strike direction in [deg], measured clockwise from north')
dip1 = Float.T(
default=90.0,
help='dip angle in [deg], measured downward from horizontal')
azimuth = Float.T(
default=0.0,
help='azimuth to second double-couple [deg], '
'measured at first, clockwise from north')
rake1 = Float.T(
default=0.0,
help='rake angle in [deg], '
'measured counter-clockwise from right-horizontal '
'in on-plane view')
strike2 = Float.T(
default=0.0,
help='strike direction in [deg], measured clockwise from north')
dip2 = Float.T(
default=90.0,
help='dip angle in [deg], measured downward from horizontal')
rake2 = Float.T(
default=0.0,
help='rake angle in [deg], '
'measured counter-clockwise from right-horizontal '
'in on-plane view')
delta_time = Float.T(
default=0.0,
help='separation of double-couples in time (t2-t1) [s]')
delta_depth = Float.T(
default=0.0,
help='difference in depth (z2-z1) [m]')
distance = Float.T(
default=0.0,
help='distance between the two double-couples [m]')
mix = Float.T(
default=0.5,
help='how to distribute the moment to the two doublecouples '
'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
stf1 = STF.T(
optional=True,
help='Source time function of subsource 1 '
'(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
stf2 = STF.T(
optional=True,
help='Source time function of subsource 2 '
'(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
discretized_source_class = meta.DiscretizedMTSource
def base_key(self):
return (
self.time, self.depth, self.lat, self.north_shift,
self.lon, self.east_shift, type(self).__name__) + \
self.effective_stf1_pre().base_key() + \
self.effective_stf2_pre().base_key() + (
self.strike1, self.dip1, self.rake1,
self.strike2, self.dip2, self.rake2,
self.delta_time, self.delta_depth,
self.azimuth, self.distance, self.mix)
def get_factor(self):
return self.moment
def effective_stf1_pre(self):
return self.stf1 or self.stf or g_unit_pulse
def effective_stf2_pre(self):
return self.stf2 or self.stf or g_unit_pulse
def effective_stf_post(self):
return g_unit_pulse
def split(self):
a1 = 1.0 - self.mix
a2 = self.mix
delta_north = math.cos(self.azimuth * d2r) * self.distance
delta_east = math.sin(self.azimuth * d2r) * self.distance
dc1 = DCSource(
lat=self.lat,
lon=self.lon,
time=self.time - self.delta_time * a2,
north_shift=self.north_shift - delta_north * a2,
east_shift=self.east_shift - delta_east * a2,
depth=self.depth - self.delta_depth * a2,
moment=self.moment * a1,
strike=self.strike1,
dip=self.dip1,
rake=self.rake1,
stf=self.stf1 or self.stf)
dc2 = DCSource(
lat=self.lat,
lon=self.lon,
time=self.time + self.delta_time * a1,
north_shift=self.north_shift + delta_north * a1,
east_shift=self.east_shift + delta_east * a1,
depth=self.depth + self.delta_depth * a1,
moment=self.moment * a2,
strike=self.strike2,
dip=self.dip2,
rake=self.rake2,
stf=self.stf2 or self.stf)
return [dc1, dc2]
def discretize_basesource(self, store, target=None):
a1 = 1.0 - self.mix
a2 = self.mix
mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
rake=self.rake1, scalar_moment=a1)
mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
rake=self.rake2, scalar_moment=a2)
delta_north = math.cos(self.azimuth * d2r) * self.distance
delta_east = math.sin(self.azimuth * d2r) * self.distance
times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
store.config.deltat, self.time - self.delta_time * a1)
times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
store.config.deltat, self.time + self.delta_time * a2)
nt1 = times1.size
nt2 = times2.size
ds = meta.DiscretizedMTSource(
lat=self.lat,
lon=self.lon,
times=num.concatenate((times1, times2)),
north_shifts=num.concatenate((
num.repeat(self.north_shift - delta_north * a1, nt1),
num.repeat(self.north_shift + delta_north * a2, nt2))),
east_shifts=num.concatenate((
num.repeat(self.east_shift - delta_east * a1, nt1),
num.repeat(self.east_shift + delta_east * a2, nt2))),
depths=num.concatenate((
num.repeat(self.depth - self.delta_depth * a1, nt1),
num.repeat(self.depth + self.delta_depth * a2, nt2))),
m6s=num.vstack((
mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
return ds
def pyrocko_moment_tensor(self, store=None, target=None):
a1 = 1.0 - self.mix
a2 = self.mix
mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
rake=self.rake1,
scalar_moment=a1 * self.moment)
mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
rake=self.rake2,
scalar_moment=a2 * self.moment)
return pmt.MomentTensor(m=mot1.m() + mot2.m())
def pyrocko_event(self, store=None, target=None, **kwargs):
return SourceWithMagnitude.pyrocko_event(
self, store, target,
moment_tensor=self.pyrocko_moment_tensor(store, target),
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
mt = ev.moment_tensor
if mt:
(strike, dip, rake), _ = mt.both_strike_dip_rake()
d.update(
strike1=float(strike),
dip1=float(dip),
rake1=float(rake),
strike2=float(strike),
dip2=float(dip),
rake2=float(rake),
mix=0.0,
magnitude=float(mt.moment_magnitude()))
d.update(kwargs)
source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
source.stf1 = source.stf
source.stf2 = HalfSinusoidSTF(effective_duration=0.)
source.stf = None
return source
class RingfaultSource(SourceWithMagnitude):
'''
A ring fault with vertical doublecouples.
'''
diameter = Float.T(
default=1.0,
help='diameter of the ring in [m]')
sign = Float.T(
default=1.0,
help='inside of the ring moves up (+1) or down (-1)')
strike = Float.T(
default=0.0,
help='strike direction of the ring plane, clockwise from north,'
' in [deg]')
dip = Float.T(
default=0.0,
help='dip angle of the ring plane from horizontal in [deg]')
npointsources = Int.T(
default=360,
help='number of point sources to use')
discretized_source_class = meta.DiscretizedMTSource
def base_key(self):
return Source.base_key(self) + (
self.strike, self.dip, self.diameter, self.npointsources)
def get_factor(self):
return self.sign * self.moment
def discretize_basesource(self, store=None, target=None):
n = self.npointsources
phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
points = num.zeros((n, 3))
points[:, 0] = num.cos(phi) * 0.5 * self.diameter
points[:, 1] = num.sin(phi) * 0.5 * self.diameter
rotmat = num.array(pmt.euler_to_matrix(
self.dip * d2r, self.strike * d2r, 0.0))
points = num.dot(rotmat.T, points.T).T # !!! ?
points[:, 0] += self.north_shift
points[:, 1] += self.east_shift
points[:, 2] += self.depth
m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
scalar_moment=1.0 / n).m())
rotmats = num.transpose(
[[num.cos(phi), num.sin(phi), num.zeros(n)],
[-num.sin(phi), num.cos(phi), num.zeros(n)],
[num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
ms = num.zeros((n, 3, 3))
for i in range(n):
mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
nt = times.size
return meta.DiscretizedMTSource(
times=num.tile(times, n),
lat=self.lat,
lon=self.lon,
north_shifts=num.repeat(points[:, 0], nt),
east_shifts=num.repeat(points[:, 1], nt),
depths=num.repeat(points[:, 2], nt),
m6s=num.repeat(m6s, nt, axis=0) * num.tile(
amplitudes, n)[:, num.newaxis])
class CombiSource(Source):
'''Composite source model.'''
discretized_source_class = meta.DiscretizedMTSource
subsources = List.T(Source.T())
def __init__(self, subsources=[], **kwargs):
if not subsources:
raise BadRequest(
'Need at least one sub-source to create a CombiSource object.')
lats = num.array(
[subsource.lat for subsource in subsources], dtype=num.float)
lons = num.array(
[subsource.lon for subsource in subsources], dtype=num.float)
lat, lon = lats[0], lons[0]
if not num.all(lats == lat) and num.all(lons == lon):
subsources = [s.clone() for s in subsources]
for subsource in subsources[1:]:
subsource.set_origin(lat, lon)
depth = float(num.mean([p.depth for p in subsources]))
time = float(num.mean([p.time for p in subsources]))
north_shift = float(num.mean([p.north_shift for p in subsources]))
east_shift = float(num.mean([p.east_shift for p in subsources]))
kwargs.update(
time=time,
lat=float(lat),
lon=float(lon),
north_shift=north_shift,
east_shift=east_shift,
depth=depth)
Source.__init__(self, subsources=subsources, **kwargs)
def get_factor(self):
return 1.0
def discretize_basesource(self, store, target=None):
dsources = []
for sf in self.subsources:
ds = sf.discretize_basesource(store, target)
ds.m6s *= sf.get_factor()
dsources.append(ds)
return meta.DiscretizedMTSource.combine(dsources)
class SFSource(Source):
'''
A single force point source.
'''
discretized_source_class = meta.DiscretizedSFSource
fn = Float.T(
default=0.,
help='northward component of single force [N]')
fe = Float.T(
default=0.,
help='eastward component of single force [N]')
fd = Float.T(
default=0.,
help='downward component of single force [N]')
def __init__(self, **kwargs):
Source.__init__(self, **kwargs)
def base_key(self):
return Source.base_key(self) + (self.fn, self.fe, self.fd)
def get_factor(self):
return 1.0
def discretize_basesource(self, store, target=None):
times, amplitudes = self.effective_stf_pre().discretize_t(
store.config.deltat, self.time)
forces = amplitudes[:, num.newaxis] * num.array(
[[self.fn, self.fe, self.fd]], dtype=num.float)
return meta.DiscretizedSFSource(forces=forces,
**self._dparams_base_repeated(times))
def pyrocko_event(self, store=None, target=None, **kwargs):
return Source.pyrocko_event(
self, store, target,
**kwargs)
@classmethod
def from_pyrocko_event(cls, ev, **kwargs):
d = {}
d.update(kwargs)
return super(SFSource, cls).from_pyrocko_event(ev, **d)
class PorePressurePointSource(Source):
'''
Excess pore pressure point source.
For poro-elastic initial value problem where an excess pore pressure is
brought into a small source volume.
'''
discretized_source_class = meta.DiscretizedPorePressureSource
pp = Float.T(
default=1.0,
help='initial excess pore pressure in [Pa]')
def base_key(self):
return Source.base_key(self)
def get_factor(self):
return self.pp
def discretize_basesource(self, store, target=None):
return meta.DiscretizedPorePressureSource(pp=arr(1.0),
**self._dparams_base())
class PorePressureLineSource(Source):
'''
Excess pore pressure line source.
The line source is centered at (north_shift, east_shift, depth).
'''
discretized_source_class = meta.DiscretizedPorePressureSource
pp = Float.T(
default=1.0,
help='initial excess pore pressure in [Pa]')
length = Float.T(
default=0.0,
help='length of the line source [m]')
azimuth = Float.T(
default=0.0,
help='azimuth direction, clockwise from north [deg]')
dip = Float.T(
default=90.,
help='dip direction, downward from horizontal [deg]')
def base_key(self):
return Source.base_key(self) + (self.azimuth, self.dip, self.length)
def get_factor(self):
return self.pp
def discretize_basesource(self, store, target=None):
n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
sa = math.sin(self.azimuth * d2r)
ca = math.cos(self.azimuth * d2r)
sd = math.sin(self.dip * d2r)
cd = math.cos(self.dip * d2r)
points = num.zeros((n, 3))
points[:, 0] = self.north_shift + a * ca * cd
points[:, 1] = self.east_shift + a * sa * cd
points[:, 2] = self.depth + a * sd
return meta.DiscretizedPorePressureSource(
times=util.num_full(n, self.time),
lat=self.lat,
lon=self.lon,
north_shifts=points[:, 0],
east_shifts=points[:, 1],
depths=points[:, 2],
pp=num.ones(n) / n)
class Request(Object):
'''
Synthetic seismogram computation request.
::
Request(**kwargs)
Request(sources, targets, **kwargs)
'''
sources = List.T(
Source.T(),
help='list of sources for which to produce synthetics.')
targets = List.T(
Target.T(),
help='list of targets for which to produce synthetics.')
@classmethod
def args2kwargs(cls, args):
if len(args) not in (0, 2, 3):
raise BadRequest('Invalid arguments.')
if len(args) == 2:
return dict(sources=args[0], targets=args[1])
else:
return {}
def __init__(self, *args, **kwargs):
kwargs.update(self.args2kwargs(args))
sources = kwargs.pop('sources', [])
targets = kwargs.pop('targets', [])
if isinstance(sources, Source):
sources = [sources]
if isinstance(targets, Target) or isinstance(targets, StaticTarget):
targets = [targets]
Object.__init__(self, sources=sources, targets=targets, **kwargs)
@property
def targets_dynamic(self):
return [t for t in self.targets if isinstance(t, Target)]
@property
def targets_static(self):
return [t for t in self.targets if isinstance(t, StaticTarget)]
@property
def has_dynamic(self):
return True if len(self.targets_dynamic) > 0 else False
@property
def has_statics(self):
return True if len(self.targets_static) > 0 else False
def subsources_map(self):
m = defaultdict(list)
for source in self.sources:
m[source.base_key()].append(source)
return m
def subtargets_map(self):
m = defaultdict(list)
for target in self.targets:
m[target.base_key()].append(target)
return m
def subrequest_map(self):
ms = self.subsources_map()
mt = self.subtargets_map()
m = {}
for (ks, ls) in ms.items():
for (kt, lt) in mt.items():
m[ks, kt] = (ls, lt)
return m
class ProcessingStats(Object):
t_perc_get_store_and_receiver = Float.T(default=0.)
t_perc_discretize_source = Float.T(default=0.)
t_perc_make_base_seismogram = Float.T(default=0.)
t_perc_make_same_span = Float.T(default=0.)
t_perc_post_process = Float.T(default=0.)
t_perc_optimize = Float.T(default=0.)
t_perc_stack = Float.T(default=0.)
t_perc_static_get_store = Float.T(default=0.)
t_perc_static_discretize_basesource = Float.T(default=0.)
t_perc_static_sum_statics = Float.T(default=0.)
t_perc_static_post_process = Float.T(default=0.)
t_wallclock = Float.T(default=0.)
t_cpu = Float.T(default=0.)
n_read_blocks = Int.T(default=0)
n_results = Int.T(default=0)
n_subrequests = Int.T(default=0)
n_stores = Int.T(default=0)
n_records_stacked = Int.T(default=0)
class Response(Object):
'''
Resonse object to a synthetic seismogram computation request.
'''
request = Request.T()
results_list = List.T(List.T(meta.SeismosizerResult.T()))
stats = ProcessingStats.T()
def pyrocko_traces(self):
'''
Return a list of requested
:class:`~pyrocko.trace.Trace` instances.
'''
traces = []
for results in self.results_list:
for result in results:
if not isinstance(result, meta.Result):
continue
traces.append(result.trace.pyrocko_trace())
return traces
def kite_scenes(self):
'''
Return a list of requested
:class:`~kite.scenes` instances.
'''
kite_scenes = []
for results in self.results_list:
for result in results:
if isinstance(result, meta.KiteSceneResult):
sc = result.get_scene()
kite_scenes.append(sc)
return kite_scenes
def static_results(self):
'''
Return a list of requested
:class:`~pyrocko.gf.meta.StaticResult` instances.
'''
statics = []
for results in self.results_list:
for result in results:
if not isinstance(result, meta.StaticResult):
continue
statics.append(result)
return statics
def iter_results(self, get='pyrocko_traces'):
'''
Generator function to iterate over results of request.
Yields associated :py:class:`Source`,
:class:`~pyrocko.gf.targets.Target`,
:class:`~pyrocko.trace.Trace` instances in each iteration.
'''
for isource, source in enumerate(self.request.sources):
for itarget, target in enumerate(self.request.targets):
result = self.results_list[isource][itarget]
if get == 'pyrocko_traces':
yield source, target, result.trace.pyrocko_trace()
elif get == 'results':
yield source, target, result
def snuffle(self, **kwargs):
'''
Open *snuffler* with requested traces.
'''
trace.snuffle(self.pyrocko_traces(), **kwargs)
class Engine(Object):
'''
Base class for synthetic seismogram calculators.
'''
def get_store_ids(self):
'''
Get list of available GF store IDs
'''
return []
class Rule(object):
pass
class VectorRule(Rule):
def __init__(self, quantity, differentiate=0, integrate=0):
self.components = [quantity + '.' + c for c in 'ned']
self.differentiate = differentiate
self.integrate = integrate
def required_components(self, target):
n, e, d = self.components
sa, ca, sd, cd = target.get_sin_cos_factors()
comps = []
if nonzero(ca * cd):
comps.append(n)
if nonzero(sa * cd):
comps.append(e)
if nonzero(sd):
comps.append(d)
return tuple(comps)
def apply_(self, target, base_seismogram):
n, e, d = self.components
sa, ca, sd, cd = target.get_sin_cos_factors()
if nonzero(ca * cd):
data = base_seismogram[n].data * (ca * cd)
deltat = base_seismogram[n].deltat
else:
data = 0.0
if nonzero(sa * cd):
data = data + base_seismogram[e].data * (sa * cd)
deltat = base_seismogram[e].deltat
if nonzero(sd):
data = data + base_seismogram[d].data * sd
deltat = base_seismogram[d].deltat
if self.differentiate:
data = util.diff_fd(self.differentiate, 4, deltat, data)
if self.integrate:
raise NotImplementedError('Integration is not implemented yet.')
return data
class HorizontalVectorRule(Rule):
def __init__(self, quantity, differentiate=0, integrate=0):
self.components = [quantity + '.' + c for c in 'ne']
self.differentiate = differentiate
self.integrate = integrate
def required_components(self, target):
n, e = self.components
sa, ca, _, _ = target.get_sin_cos_factors()
comps = []
if nonzero(ca):
comps.append(n)
if nonzero(sa):
comps.append(e)
return tuple(comps)
def apply_(self, target, base_seismogram):
n, e = self.components
sa, ca, _, _ = target.get_sin_cos_factors()
if nonzero(ca):
data = base_seismogram[n].data * ca
else:
data = 0.0
if nonzero(sa):
data = data + base_seismogram[e].data * sa
if self.differentiate:
deltat = base_seismogram[e].deltat
data = util.diff_fd(self.differentiate, 4, deltat, data)
if self.integrate:
raise NotImplementedError('Integration is not implemented yet.')
return data
class ScalarRule(Rule):
def __init__(self, quantity, differentiate=0):
self.c = quantity
def required_components(self, target):
return (self.c, )
def apply_(self, target, base_seismogram):
data = base_seismogram[self.c].data.copy()
deltat = base_seismogram[self.c].deltat
if self.differentiate:
data = util.diff_fd(self.differentiate, 4, deltat, data)
return data
class StaticDisplacement(Rule):
def required_components(self, target):
return tuple(['displacement.%s' % c for c in list('ned')])
def apply_(self, target, base_statics):
if isinstance(target, SatelliteTarget):
los_fac = target.get_los_factors()
base_statics['displacement.los'] =\
(los_fac[:, 0] * -base_statics['displacement.d'] +
los_fac[:, 1] * base_statics['displacement.e'] +
los_fac[:, 2] * base_statics['displacement.n'])
return base_statics
channel_rules = {
'displacement': [VectorRule('displacement')],
'rotation': [VectorRule('rotation')],
'velocity': [
VectorRule('velocity'),
VectorRule('displacement', differentiate=1)],
'acceleration': [
VectorRule('acceleration'),
VectorRule('velocity', differentiate=1),
VectorRule('displacement', differentiate=2)],
'pore_pressure': [ScalarRule('pore_pressure')],
'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
'darcy_velocity': [VectorRule('darcy_velocity')],
}
static_rules = {
'displacement': [StaticDisplacement()]
}
class OutOfBoundsContext(Object):
source = Source.T()
target = Target.T()
distance = Float.T()
components = List.T(String.T())
def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
dsource_cache = {}
tcounters = list(range(6))
store_ids = set()
sources = set()
targets = set()
for itarget, target in enumerate(ptargets):
target._id = itarget
for w in work:
_, _, isources, itargets = w
sources.update([psources[isource] for isource in isources])
targets.update([ptargets[itarget] for itarget in itargets])
store_ids = set([t.store_id for t in targets])
for isource, source in enumerate(psources):
components = set()
for itarget, target in enumerate(targets):
rule = engine.get_rule(source, target)
components.update(rule.required_components(target))
for store_id in store_ids:
store_targets = [t for t in targets if t.store_id == store_id]
sample_rates = set([t.sample_rate for t in store_targets])
interpolations = set([t.interpolation for t in store_targets])
base_seismograms = []
store_targets_out = []
for samp_rate in sample_rates:
for interp in interpolations:
engine_targets = [
t for t in store_targets if t.sample_rate == samp_rate
and t.interpolation == interp]
if not engine_targets:
continue
store_targets_out += engine_targets
base_seismograms += engine.base_seismograms(
source,
engine_targets,
components,
dsource_cache,
nthreads)
for iseis, seismogram in enumerate(base_seismograms):
for tr in seismogram.values():
if tr.err != store.SeismosizerErrorEnum.SUCCESS:
e = SeismosizerError(
'Seismosizer failed with return code %i\n%s' % (
tr.err, str(
OutOfBoundsContext(
source=source,
target=store_targets[iseis],
distance=source.distance_to(
store_targets[iseis]),
components=components))))
raise e
for seismogram, target in zip(base_seismograms, store_targets_out):
try:
result = engine._post_process_dynamic(
seismogram, source, target)
except SeismosizerError as e:
result = e
yield (isource, target._id, result), tcounters
def process_dynamic(work, psources, ptargets, engine, nthreads=0):
dsource_cache = {}
for w in work:
_, _, isources, itargets = w
sources = [psources[isource] for isource in isources]
targets = [ptargets[itarget] for itarget in itargets]
components = set()
for target in targets:
rule = engine.get_rule(sources[0], target)
components.update(rule.required_components(target))
for isource, source in zip(isources, sources):
for itarget, target in zip(itargets, targets):
try:
base_seismogram, tcounters = engine.base_seismogram(
source, target, components, dsource_cache, nthreads)
except meta.OutOfBounds as e:
e.context = OutOfBoundsContext(
source=sources[0],
target=targets[0],
distance=sources[0].distance_to(targets[0]),
components=components)
raise
n_records_stacked = 0
t_optimize = 0.0
t_stack = 0.0
for _, tr in base_seismogram.items():
n_records_stacked += tr.n_records_stacked
t_optimize += tr.t_optimize
t_stack += tr.t_stack
try:
result = engine._post_process_dynamic(
base_seismogram, source, target)
result.n_records_stacked = n_records_stacked
result.n_shared_stacking = len(sources) *\
len(targets)
result.t_optimize = t_optimize
result.t_stack = t_stack
except SeismosizerError as e:
result = e
tcounters.append(xtime())
yield (isource, itarget, result), tcounters
def process_static(work, psources, ptargets, engine, nthreads=0):
for w in work:
_, _, isources, itargets = w
sources = [psources[isource] for isource in isources]
targets = [ptargets[itarget] for itarget in itargets]
for isource, source in zip(isources, sources):
for itarget, target in zip(itargets, targets):
components = engine.get_rule(source, target)\
.required_components(target)
try:
base_statics, tcounters = engine.base_statics(
source, target, components, nthreads)
except meta.OutOfBounds as e:
e.context = OutOfBoundsContext(
source=sources[0],
target=targets[0],
distance=float('nan'),
components=components)
raise
result = engine._post_process_statics(
base_statics, source, target)
tcounters.append(xtime())
yield (isource, itarget, result), tcounters
class LocalEngine(Engine):
'''
Offline synthetic seismogram calculator.
:param use_env: if ``True``, fill :py:attr:`store_superdirs` and
:py:attr:`store_dirs` with paths set in environment variables
GF_STORE_SUPERDIRS and GF_STORE_DIRS.
:param use_config: if ``True``, fill :py:attr:`store_superdirs` and
:py:attr:`store_dirs` with paths set in the user's config file.
The config file can be found at :file:`~/.pyrocko/config.pf`
.. code-block :: python
gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
gf_store_superdirs: ['/home/pyrocko/gf_stores/']
'''
store_superdirs = List.T(
String.T(),
help='directories which are searched for Green\'s function stores')
store_dirs = List.T(
String.T(),
help='additional individual Green\'s function store directories')
default_store_id = String.T(
optional=True,
help='default store ID to be used when a request does not provide '
'one')
def __init__(self, **kwargs):
use_env = kwargs.pop('use_env', False)
use_config = kwargs.pop('use_config', False)
Engine.__init__(self, **kwargs)
if use_env:
env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
if env_store_superdirs:
self.store_superdirs.extend(env_store_superdirs.split(':'))
if env_store_dirs:
self.store_dirs.extend(env_store_dirs.split(':'))
if use_config:
c = config.config()
self.store_superdirs.extend(c.gf_store_superdirs)
self.store_dirs.extend(c.gf_store_dirs)
self._check_store_dirs_type()
self._id_to_store_dir = {}
self._open_stores = {}
self._effective_default_store_id = None
def _check_store_dirs_type(self):
for sdir in ['store_dirs', 'store_superdirs']:
if not isinstance(self.__getattribute__(sdir), list):
raise TypeError("{} of {} is not of type list".format(
sdir, self.__class__.__name__))
def _get_store_id(self, store_dir):
store_ = store.Store(store_dir)
store_id = store_.config.id
store_.close()
return store_id
def _looks_like_store_dir(self, store_dir):
return os.path.isdir(store_dir) and \
all(os.path.isfile(pjoin(store_dir, x)) for x in
('index', 'traces', 'config'))
def iter_store_dirs(self):
store_dirs = set()
for d in self.store_superdirs:
if not os.path.exists(d):
logger.warning('store_superdir not available: %s' % d)
continue
for entry in os.listdir(d):
store_dir = os.path.realpath(pjoin(d, entry))
if self._looks_like_store_dir(store_dir):
store_dirs.add(store_dir)
for store_dir in self.store_dirs:
store_dirs.add(os.path.realpath(store_dir))
return store_dirs
def _scan_stores(self):
for store_dir in self.iter_store_dirs():
store_id = self._get_store_id(store_dir)
if store_id not in self._id_to_store_dir:
self._id_to_store_dir[store_id] = store_dir
else:
if store_dir != self._id_to_store_dir[store_id]:
raise DuplicateStoreId(
'GF store ID %s is used in (at least) two '
'different stores. Locations are: %s and %s' %
(store_id, self._id_to_store_dir[store_id], store_dir))
def get_store_dir(self, store_id):
'''
Lookup directory given a GF store ID.
'''
if store_id not in self._id_to_store_dir:
self._scan_stores()
if store_id not in self._id_to_store_dir:
raise NoSuchStore(store_id, self.iter_store_dirs())
return self._id_to_store_dir[store_id]
def get_store_ids(self):
'''
Get list of available store IDs.
'''
self._scan_stores()
return sorted(self._id_to_store_dir.keys())
def effective_default_store_id(self):
if self._effective_default_store_id is None:
if self.default_store_id is None:
store_ids = self.get_store_ids()
if len(store_ids) == 1:
self._effective_default_store_id = self.get_store_ids()[0]
else:
raise NoDefaultStoreSet()
else:
self._effective_default_store_id = self.default_store_id
return self._effective_default_store_id
def get_store(self, store_id=None):
'''
Get a store from the engine.
:param store_id: identifier of the store (optional)
:returns: :py:class:`~pyrocko.gf.store.Store` object
If no ``store_id`` is provided the store
associated with the :py:gattr:`default_store_id` is returned.
Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
undefined.
'''
if store_id is None:
store_id = self.effective_default_store_id()
if store_id not in self._open_stores:
store_dir = self.get_store_dir(store_id)
self._open_stores[store_id] = store.Store(store_dir)
return self._open_stores[store_id]
def get_store_config(self, store_id):
store = self.get_store(store_id)
return store.config
def get_store_extra(self, store_id, key):
store = self.get_store(store_id)
return store.get_extra(key)
def close_cashed_stores(self):
'''
Close and remove ids from cashed stores.
'''
store_ids = []
for store_id, store_ in self._open_stores.items():
store_.close()
store_ids.append(store_id)
for store_id in store_ids:
self._open_stores.pop(store_id)
def get_rule(self, source, target):
cprovided = self.get_store(target.store_id).get_provided_components()
if isinstance(target, StaticTarget):
quantity = target.quantity
available_rules = static_rules
elif isinstance(target, Target):
quantity = target.effective_quantity()
available_rules = channel_rules
try:
for rule in available_rules[quantity]:
cneeded = rule.required_components(target)
if all(c in cprovided for c in cneeded):
return rule
except KeyError:
pass
raise BadRequest(
'No rule to calculate "%s" with GFs from store "%s" '
'for source model "%s".' % (
target.effective_quantity(),
target.store_id,
source.__class__.__name__))
def _cached_discretize_basesource(self, source, store, cache, target):
if (source, store) not in cache:
cache[source, store] = source.discretize_basesource(store, target)
return cache[source, store]
def base_seismograms(self, source, targets, components, dsource_cache,
nthreads=0):
target = targets[0]
interp = set([t.interpolation for t in targets])
if len(interp) > 1:
raise BadRequest('Targets have different interpolation schemes.')
rates = set([t.sample_rate for t in targets])
if len(rates) > 1:
raise BadRequest('Targets have different sample rates.')
store_ = self.get_store(target.store_id)
receivers = [t.receiver(store_) for t in targets]
if target.sample_rate is not None:
deltat = 1. / target.sample_rate
rate = target.sample_rate
else:
deltat = None
rate = store_.config.sample_rate
tmin = num.fromiter(
(t.tmin for t in targets), dtype=num.float, count=len(targets))
tmax = num.fromiter(
(t.tmax for t in targets), dtype=num.float, count=len(targets))
itmin = num.floor(tmin * rate).astype(num.int64)
itmax = num.ceil(tmax * rate).astype(num.int64)
nsamples = itmax - itmin + 1
mask = num.isnan(tmin)
itmin[mask] = 0
nsamples[mask] = -1
base_source = self._cached_discretize_basesource(
source, store_, dsource_cache, target)
base_seismograms = store_.calc_seismograms(
base_source, receivers, components,
deltat=deltat,
itmin=itmin, nsamples=nsamples,
interpolation=target.interpolation,
optimization=target.optimization,
nthreads=nthreads)
for i, base_seismogram in enumerate(base_seismograms):
base_seismograms[i] = store.make_same_span(base_seismogram)
return base_seismograms
def base_seismogram(self, source, target, components, dsource_cache,
nthreads):
tcounters = [xtime()]
store_ = self.get_store(target.store_id)
receiver = target.receiver(store_)
if target.tmin and target.tmax is not None:
rate = store_.config.sample_rate
itmin = int(num.floor(target.tmin * rate))
itmax = int(num.ceil(target.tmax * rate))
nsamples = itmax - itmin + 1
else:
itmin = None
nsamples = None
tcounters.append(xtime())
base_source = self._cached_discretize_basesource(
source, store_, dsource_cache, target)
tcounters.append(xtime())
if target.sample_rate is not None:
deltat = 1. / target.sample_rate
else:
deltat = None
base_seismogram = store_.seismogram(
base_source, receiver, components,
deltat=deltat,
itmin=itmin, nsamples=nsamples,
interpolation=target.interpolation,
optimization=target.optimization,
nthreads=nthreads)
tcounters.append(xtime())
base_seismogram = store.make_same_span(base_seismogram)
tcounters.append(xtime())
return base_seismogram, tcounters
def base_statics(self, source, target, components, nthreads):
tcounters = [xtime()]
store_ = self.get_store(target.store_id)
if target.tsnapshot is not None:
rate = store_.config.sample_rate
itsnapshot = int(num.floor(target.tsnapshot * rate))
else:
itsnapshot = None
tcounters.append(xtime())
base_source = source.discretize_basesource(store_, target=target)
tcounters.append(xtime())
base_statics = store_.statics(
base_source,
target,
itsnapshot,
components,
target.interpolation,
nthreads)
tcounters.append(xtime())
return base_statics, tcounters
def _post_process_dynamic(self, base_seismogram, source, target):
base_any = next(iter(base_seismogram.values()))
deltat = base_any.deltat
itmin = base_any.itmin
rule = self.get_rule(source, target)
data = rule.apply_(target, base_seismogram)
factor = source.get_factor() * target.get_factor()
if factor != 1.0:
data = data * factor
stf = source.effective_stf_post()
times, amplitudes = stf.discretize_t(
deltat, 0.0)
# repeat end point to prevent boundary effects
padded_data = num.empty(data.size + amplitudes.size, dtype=num.float)
padded_data[:data.size] = data
padded_data[data.size:] = data[-1]
data = num.convolve(amplitudes, padded_data)
tmin = itmin * deltat + times[0]
tr = meta.SeismosizerTrace(
codes=target.codes,
data=data[:-amplitudes.size],
deltat=deltat,
tmin=tmin)
return target.post_process(self, source, tr)
def _post_process_statics(self, base_statics, source, starget):
rule = self.get_rule(source, starget)
data = rule.apply_(starget, base_statics)
factor = source.get_factor()
if factor != 1.0:
for v in data.values():
v *= factor
return starget.post_process(self, source, base_statics)
def process(self, *args, **kwargs):
'''
Process a request.
::
process(**kwargs)
process(request, **kwargs)
process(sources, targets, **kwargs)
The request can be given a a :py:class:`Request` object, or such an
object is created using ``Request(**kwargs)`` for convenience.
:returns: :py:class:`Response` object
'''
if len(args) not in (0, 1, 2):
raise BadRequest('Invalid arguments.')
if len(args) == 1:
kwargs['request'] = args[0]
elif len(args) == 2:
kwargs.update(Request.args2kwargs(args))
request = kwargs.pop('request', None)
status_callback = kwargs.pop('status_callback', None)
calc_timeseries = kwargs.pop('calc_timeseries', True)
nprocs = kwargs.pop('nprocs', None)
nthreads = kwargs.pop('nthreads', 1)
if nprocs is not None:
nthreads = nprocs
if request is None:
request = Request(**kwargs)
if resource:
rs0 = resource.getrusage(resource.RUSAGE_SELF)
rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
tt0 = xtime()
# make sure stores are open before fork()
store_ids = set(target.store_id for target in request.targets)
for store_id in store_ids:
self.get_store(store_id)
source_index = dict((x, i) for (i, x) in
enumerate(request.sources))
target_index = dict((x, i) for (i, x) in
enumerate(request.targets))
m = request.subrequest_map()
skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
results_list = []
for i in range(len(request.sources)):
results_list.append([None] * len(request.targets))
tcounters_dyn_list = []
tcounters_static_list = []
nsub = len(skeys)
isub = 0
# Processing dynamic targets through
# parimap(process_subrequest_dynamic)
if calc_timeseries:
_process_dynamic = process_dynamic_timeseries
else:
_process_dynamic = process_dynamic
if request.has_dynamic:
work_dynamic = [
(i, nsub,
[source_index[source] for source in m[k][0]],
[target_index[target] for target in m[k][1]
if not isinstance(target, StaticTarget)])
for (i, k) in enumerate(skeys)]
for ii_results, tcounters_dyn in _process_dynamic(
work_dynamic, request.sources, request.targets, self,
nthreads):
tcounters_dyn_list.append(num.diff(tcounters_dyn))
isource, itarget, result = ii_results
results_list[isource][itarget] = result
if status_callback:
status_callback(isub, nsub)
isub += 1
# Processing static targets through process_static
if request.has_statics:
work_static = [
(i, nsub,
[source_index[source] for source in m[k][0]],
[target_index[target] for target in m[k][1]
if isinstance(target, StaticTarget)])
for (i, k) in enumerate(skeys)]
for ii_results, tcounters_static in process_static(
work_static, request.sources, request.targets, self,
nthreads=nthreads):
tcounters_static_list.append(num.diff(tcounters_static))
isource, itarget, result = ii_results
results_list[isource][itarget] = result
if status_callback:
status_callback(isub, nsub)
isub += 1
if status_callback:
status_callback(nsub, nsub)
tt1 = time.time()
if resource:
rs1 = resource.getrusage(resource.RUSAGE_SELF)
rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
s = ProcessingStats()
if request.has_dynamic:
tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
t_dyn = float(num.sum(tcumu_dyn))
perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
(s.t_perc_get_store_and_receiver,
s.t_perc_discretize_source,
s.t_perc_make_base_seismogram,
s.t_perc_make_same_span,
s.t_perc_post_process) = perc_dyn
else:
t_dyn = 0.
if request.has_statics:
tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
t_static = num.sum(tcumu_static)
perc_static = map(float, tcumu_static / t_static * 100.)
(s.t_perc_static_get_store,
s.t_perc_static_discretize_basesource,
s.t_perc_static_sum_statics,
s.t_perc_static_post_process) = perc_static
s.t_wallclock = tt1 - tt0
if resource:
s.t_cpu = (
(rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
(rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
s.n_read_blocks = (
(rs1.ru_inblock + rc1.ru_inblock) -
(rs0.ru_inblock + rc0.ru_inblock))
n_records_stacked = 0.
for results in results_list:
for result in results:
if not isinstance(result, meta.Result):
continue
shr = float(result.n_shared_stacking)
n_records_stacked += result.n_records_stacked / shr
s.t_perc_optimize += result.t_optimize / shr
s.t_perc_stack += result.t_stack / shr
s.n_records_stacked = int(n_records_stacked)
if t_dyn != 0.:
s.t_perc_optimize /= t_dyn * 100
s.t_perc_stack /= t_dyn * 100
return Response(
request=request,
results_list=results_list,
stats=s)
class RemoteEngine(Engine):
'''
Client for remote synthetic seismogram calculator.
'''
site = String.T(default=ws.g_default_site, optional=True)
url = String.T(default=ws.g_url, optional=True)
def process(self, request=None, status_callback=None, **kwargs):
if request is None:
request = Request(**kwargs)
return ws.seismosizer(url=self.url, site=self.site, request=request)
g_engine = None
def get_engine(store_superdirs=[]):
global g_engine
if g_engine is None:
g_engine = LocalEngine(use_env=True, use_config=True)
for d in store_superdirs:
if d not in g_engine.store_superdirs:
g_engine.store_superdirs.append(d)
return g_engine
class SourceGroup(Object):
def __getattr__(self, k):
return num.fromiter((getattr(s, k) for s in self),
dtype=num.float)
def __iter__(self):
raise NotImplementedError(
'This method should be implemented in subclass.')
def __len__(self):
raise NotImplementedError(
'This method should be implemented in subclass.')
class SourceList(SourceGroup):
sources = List.T(Source.T())
def append(self, s):
self.sources.append(s)
def __iter__(self):
return iter(self.sources)
def __len__(self):
return len(self.sources)
class SourceGrid(SourceGroup):
base = Source.T()
variables = Dict.T(String.T(), Range.T())
order = List.T(String.T())
def __len__(self):
n = 1
for (k, v) in self.make_coords(self.base):
n *= len(list(v))
return n
def __iter__(self):
for items in permudef(self.make_coords(self.base)):
s = self.base.clone(**{k: v for (k, v) in items})
s.regularize()
yield s
def ordered_params(self):
ks = list(self.variables.keys())
for k in self.order + list(self.base.keys()):
if k in ks:
yield k
ks.remove(k)
if ks:
raise Exception('Invalid parameter "%s" for source type "%s".' %
(ks[0], self.base.__class__.__name__))
def make_coords(self, base):
return [(param, self.variables[param].make(base=base[param]))
for param in self.ordered_params()]
source_classes = [
Source,
SourceWithMagnitude,
SourceWithDerivedMagnitude,
ExplosionSource,
RectangularExplosionSource,
DCSource,
CLVDSource,
VLVDSource,
MTSource,
RectangularSource,
DoubleDCSource,
RingfaultSource,
CombiSource,
SFSource,
PorePressurePointSource,
PorePressureLineSource,
]
stf_classes = [
STF,
BoxcarSTF,
TriangularSTF,
HalfSinusoidSTF,
ResonatorSTF,
]
__all__ = '''
SeismosizerError
BadRequest
NoSuchStore
DerivedMagnitudeError
STFMode
'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
Request
ProcessingStats
Response
Engine
LocalEngine
RemoteEngine
source_classes
get_engine
Range
SourceGroup
SourceList
SourceGrid
map_anchor
'''.split()
| gpl-3.0 | -5,835,698,615,705,234,000 | 30.362567 | 79 | 0.553282 | false |
NuAvatar/clandmark | python_interface/bin/flandmark_demo.py | 6 | 2152 | import numpy as np
import os
from fnmatch import fnmatch
from py_flandmark import PyFlandmark
from PIL import Image
import ImageDraw
import matplotlib.pyplot as plt
def rgb2gray(rgb):
"""
converts rgb array to grey scale variant
accordingly to fomula taken from wiki
(this function is missing in python)
"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def read_bbox_from_txt(file_name):
"""
returns 2x2 matrix coordinates of
left upper and right lower corners
of rectangle that contains face stored
in columns of matrix
"""
f = open(file_name)
str = f.read().replace(',', ' ')
f.close()
ret = np.array(map(int,str.split()) ,dtype=np.int32)
ret = ret.reshape((2,2), order='F')
return ret
DIR = '../../../data/Images/'
JPGS = [f for f in os.listdir(DIR) if fnmatch(f, '*.jpg')]
flmrk = PyFlandmark("../../../data/flandmark_model.xml", False)
for jpg_name in JPGS:
file_name = jpg_name[:-4]
img = Image.open(DIR + jpg_name)
arr = rgb2gray(np.asarray(img))
bbox = read_bbox_from_txt(DIR + jpg_name[:-4] + '.det')
d_landmarks = flmrk.detect(arr, bbox)
n = d_landmarks.shape[1]
print "test detect method"
im = Image.fromarray(arr)
img_dr = ImageDraw.Draw(im)
img_dr.rectangle([tuple(bbox[:,0]), tuple(bbox[:,1])], outline="#FF00FF")
r = 2.
for i in xrange(n):
x = d_landmarks[0,i]
y = d_landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect method"
frame = flmrk.get_normalized_frame(arr, bbox)[0]
frame = frame.astype(np.double)
im = Image.fromarray(frame)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test detect_base method"
landmarks = flmrk.detect_base(frame)
im = Image.fromarray(frame)
img_dr = ImageDraw.Draw(im)
r = 2.
for i in xrange(n):
x = landmarks[0,i]
y = landmarks[1,i]
img_dr.ellipse((x-r, y-r, x+r, y+r), fill=0.)
plt.imshow(np.asarray(im), cmap = plt.get_cmap('gray'))
plt.show()
print "test psi method"
psi = flmrk.get_psi(frame, landmarks.astype(np.int32), bbox)
#flmrk.get_psi(d_landmarks, arr, bbox)
break | gpl-3.0 | 3,724,801,805,660,801,500 | 22.659341 | 74 | 0.659851 | false |
drufat/vispy | examples/basics/gloo/gpuimage.py | 18 | 3260 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# vispy: gallery 100
"""
Illustrate how to plot a 2D function (an image) y=f(x,y) on the GPU.
"""
from vispy import app, gloo
vertex = """
attribute vec2 a_position;
varying vec2 v_position;
void main()
{
gl_Position = vec4(a_position, 0.0, 1.0);
v_position = a_position;
}
"""
fragment = """
#include "math/constants.glsl"
//const float M_PI = 3.14159265358979323846;
uniform float u_time;
varying vec2 v_position;
/**********************************************************
Specify the parameters here.
**********************************************************/
const float z_offset = 1.; // (z+z_offset)/z_max should be in [0,1]
const float z_max = 2.;
const float x_scale = 5.; // x is between -x_scale and +x_scale
const float y_scale = 5.; // y is between -y_scale and +y_scale
const float t_scale = 5.; // scale for the time
/*********************************************************/
float f(float x, float y, float t) {
// x is in [-x_scale, +x_scale]
// y is in [-y_scale, +y_scale]
// t is in [0, +oo)
/**********************************************************
Write your function below.
**********************************************************/
float k = .25*cos(t);
return (cos(x)+k)*(sin(y)-k);
/*********************************************************/
}
vec4 jet(float x) {
vec3 a, b;
float c;
if (x < 0.34) {
a = vec3(0, 0, 0.5);
b = vec3(0, 0.8, 0.95);
c = (x - 0.0) / (0.34 - 0.0);
} else if (x < 0.64) {
a = vec3(0, 0.8, 0.95);
b = vec3(0.85, 1, 0.04);
c = (x - 0.34) / (0.64 - 0.34);
} else if (x < 0.89) {
a = vec3(0.85, 1, 0.04);
b = vec3(0.96, 0.7, 0);
c = (x - 0.64) / (0.89 - 0.64);
} else {
a = vec3(0.96, 0.7, 0);
b = vec3(0.5, 0, 0);
c = (x - 0.89) / (1.0 - 0.89);
}
return vec4(mix(a, b, c), 1.0);
}
void main() {
vec2 pos = v_position;
float z = f(x_scale * pos.x, y_scale * pos.y, t_scale * u_time);
gl_FragColor = jet((z + z_offset) / (z_max));
}
"""
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, position=(300, 100),
size=(800, 800), keys='interactive')
self.program = gloo.Program(vertex, fragment)
self.program['a_position'] = [(-1., -1.), (-1., +1.),
(+1., -1.), (+1., +1.)]
self.program['u_time'] = 0.0
self.timer = app.Timer('auto', connect=self.on_timer, start=True)
self.show()
def on_timer(self, event):
self.program['u_time'] = event.elapsed
self.update()
def on_resize(self, event):
width, height = event.physical_size
gloo.set_viewport(0, 0, width, height)
def on_draw(self, event):
self.program.draw('triangle_strip')
if __name__ == '__main__':
canvas = Canvas()
app.run()
| bsd-3-clause | 5,532,328,391,086,270,000 | 27.596491 | 79 | 0.441718 | false |
robbiet480/home-assistant | homeassistant/components/plum_lightpad/light.py | 7 | 7199 | """Support for Plum Lightpad lights."""
import asyncio
import logging
from typing import Callable, List
from plumlightpad import Plum
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
import homeassistant.util.color as color_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None],
) -> None:
"""Set up Plum Lightpad dimmer lights and glow rings."""
plum: Plum = hass.data[DOMAIN][entry.entry_id]
def setup_entities(device) -> None:
entities = []
if "lpid" in device:
lightpad = plum.get_lightpad(device["lpid"])
entities.append(GlowRing(lightpad=lightpad))
if "llid" in device:
logical_load = plum.get_load(device["llid"])
entities.append(PlumLight(load=logical_load))
if entities:
async_add_entities(entities)
async def new_load(device):
setup_entities(device)
async def new_lightpad(device):
setup_entities(device)
device_web_session = async_get_clientsession(hass, verify_ssl=False)
asyncio.create_task(
plum.discover(
hass.loop,
loadListener=new_load,
lightpadListener=new_lightpad,
websession=device_web_session,
)
)
class PlumLight(LightEntity):
"""Representation of a Plum Lightpad dimmer."""
def __init__(self, load):
"""Initialize the light."""
self._load = load
self._brightness = load.level
async def async_added_to_hass(self):
"""Subscribe to dimmerchange events."""
self._load.add_event_listener("dimmerchange", self.dimmerchange)
def dimmerchange(self, event):
"""Change event handler updating the brightness."""
self._brightness = event["level"]
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine logical load ID with .light to guarantee it is unique."""
return f"{self._load.llid}.light"
@property
def name(self):
"""Return the name of the switch if any."""
return self._load.name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Dimmer",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return self._brightness
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._brightness > 0
@property
def supported_features(self):
"""Flag supported features."""
if self._load.dimmable:
return SUPPORT_BRIGHTNESS
return 0
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._load.turn_on(kwargs[ATTR_BRIGHTNESS])
else:
await self._load.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._load.turn_off()
class GlowRing(LightEntity):
"""Representation of a Plum Lightpad dimmer glow ring."""
def __init__(self, lightpad):
"""Initialize the light."""
self._lightpad = lightpad
self._name = f"{lightpad.friendly_name} Glow Ring"
self._state = lightpad.glow_enabled
self._glow_intensity = lightpad.glow_intensity
self._red = lightpad.glow_color["red"]
self._green = lightpad.glow_color["green"]
self._blue = lightpad.glow_color["blue"]
async def async_added_to_hass(self):
"""Subscribe to configchange events."""
self._lightpad.add_event_listener("configchange", self.configchange_event)
def configchange_event(self, event):
"""Handle Configuration change event."""
config = event["changes"]
self._state = config["glowEnabled"]
self._glow_intensity = config["glowIntensity"]
self._red = config["glowColor"]["red"]
self._green = config["glowColor"]["green"]
self._blue = config["glowColor"]["blue"]
self.schedule_update_ha_state()
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return color_util.color_RGB_to_hs(self._red, self._green, self._blue)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine LightPad ID with .glow to guarantee it is unique."""
return f"{self._lightpad.lpid}.glow"
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Glow Ring",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return min(max(int(round(self._glow_intensity * 255, 0)), 0), 255)
@property
def glow_intensity(self):
"""Brightness in float form."""
return self._glow_intensity
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._state
@property
def icon(self):
"""Return the crop-portrait icon representing the glow ring."""
return "mdi:crop-portrait"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
elif ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
red, green, blue = color_util.color_hs_to_RGB(*hs_color)
await self._lightpad.set_glow_color(red, green, blue, 0)
else:
await self._lightpad.set_config({"glowEnabled": True})
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
else:
await self._lightpad.set_config({"glowEnabled": False})
| apache-2.0 | 7,733,603,410,718,055,000 | 28.747934 | 82 | 0.605501 | false |
ncss-tech/geo-pit | alena_tools/desktop__V_tools/Generate_Regional_Transactional_Region_11_FGDB_old.py | 1 | 50030 | # Create_Regional_Transactional_FGDB
#
# 1/16/2014
#
# Adolfo Diaz, Region 10 GIS Specialist
# USDA - Natural Resources Conservation Service
# Madison, WI 53719
# [email protected]
# 608.662.4422 ext. 216
#
#
# Beginning of Functions
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def print_exception():
tb = sys.exc_info()[2]
l = traceback.format_tb(tb)
l.reverse()
tbinfo = "".join(l)
AddMsgAndPrint("\n\n----------ERROR Start-------------------",2)
AddMsgAndPrint("Traceback Info: \n" + tbinfo + "Error Info: \n " + str(sys.exc_type)+ ": " + str(sys.exc_value) + "",2)
AddMsgAndPrint("----------ERROR End-------------------- \n\n",2)
## ================================================================================================================
def AddMsgAndPrint(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
# Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
#print msg
try:
#for string in msg.split('\n'):
# Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(msg)
elif severity == 1:
arcpy.AddWarning(msg)
elif severity == 2:
arcpy.AddError(msg)
except:
pass
## ===================================================================================
def getRegionalAreaSymbolList(ssurgoSSApath, userRegionChoice):
# Returns the actual region number from the first parameter.
# If the value has 1 integer than it should total 8 characters,
# last character will be returned. Otherwise, value has 2 integers
# and last 2 will be returned.
# [u'WI001, u'WI003']
try:
areaSymbolList = []
whereClause = "\"Region_Download\" = '" + userRegionChoice + "'"
fields = ('AREASYMBOL')
with arcpy.da.SearchCursor(ssurgoSSApath, fields, whereClause) as cursor:
for row in cursor:
areaSymbolList.append(row[0])
del whereClause, fields
return areaSymbolList
except:
print_exception
return ""
## ===================================================================================
def validateSSAs(surveyList, wssLibrary):
# checks for missing SSURGO datasets in the wssLibrary folder. If any SSURGO dataset is
# missing return "". All ssurgo datasets must be present in order to (re)construct the
# regional Transactional database. Also checks for duplicate ssurgo datasets in the
# wssLibrary. Return "" if duplicates are found. Cannot have duplicates b/c this will
# cause topology overlap and the duplicate datasets may be of different versions.
# Returns a dictionary containing areasymbol & ssurgo dataset path
#
# {'ID001': 'C:\\Temp\\junk\\soils_id001', 'ID002': 'C:\\Temp\\junk\\wss_SSA_ID002_soildb_ID_2003_[2012-08-13]'
try:
import collections
ssurgoDatasetDict = dict() # [AreaSymbol] = C:\Temp\junk\soil_ca688
wssLibraryList = [] # ['WI025','WI027']
# get a list of all files in wssLibrary folder
for file in os.listdir(wssLibrary):
# Full path to individual file in wssLibrary folder
filePath = os.path.join(wssLibrary,file)
# extract areasymbol name if file is a directory and a ssurgo dataset
if os.path.isdir(filePath):
# folder is named in WSS 3.0 format i.e. 'wss_SSA_WI063_soildb_WI_2003_[2012-06-27]'
if file.find("wss_SSA_") > -1:
SSA = file[file.find("SSA_") + 4:file.find("soildb")-1].upper()
wssLibraryList.append(SSA)
if SSA in surveyList:
ssurgoDatasetDict[SSA] = filePath
del SSA
# folder is named according to traditional SDM format i.e. 'soil_wa001'
elif file.find("soil_") > -1:
SSA = file[-5:].upper()
wssLibraryList.append(SSA)
if SSA in surveyList:
ssurgoDatasetDict[SSA] = filePath
del SSA
# folder is name in plural format instead of singular. Accident!!!
elif file.find("soils_") > -1:
SSA = file[-5:].upper()
wssLibraryList.append(SSA)
if SSA in surveyList:
ssurgoDatasetDict[SSA] = filePath
del SSA
# Not a SSURGO dataset; some other folder
else:
pass
# ------------------------------------------------------------------------ No Datasets in Library
if len(wssLibraryList) < 1:
AddMsgAndPrint(" \n\tNo SSURGO datasets were found in " + os.path.dirname(wssLibrary) + " directory",2)
return ""
# ------------------------------------------------------------------------ Missing SSURGO Datasets
missingSSAList = []
# check for missing SSURGO datasets in wssLibrary. Print missing datasets and return False.
for survey in surveyList:
if not survey in wssLibraryList:
missingSSAList.append(survey)
if len(missingSSAList) > 0:
AddMsgAndPrint("\n" + "The following Regional SSURGO datasets are missing from your local datasets",2)
for survey in missingSSAList:
AddMsgAndPrint("\t" + survey,2)
ssurgoDatasetDict.pop(survey, None)
# ---------------------------------------------------------------------- Duplicate SSURGO Datasets
# check for duplicate SSURGO SSAs in wssLibrary. Print duplicates. Return False only if the duplicates affects those surveys in the regional list.
# Cannot have duplicates b/c of different versions and overlap
if len([x for x, y in collections.Counter(wssLibraryList).items() if y > 1]):
duplicateSSAs = []
for survey in [x for x, y in collections.Counter(wssLibraryList).items() if y > 1]:
if survey in ssurgoDatasetDict:
duplicateSSAs.append(survey)
if len(duplicateSSAs) > 0:
AddMsgAndPrint(" \n\tThe following are duplicate SSURGO datasets found in " + os.path.basename(wssLibrary) + " directory:",2)
for survey in duplicateSSAs:
AddMsgAndPrint(" \t\t" + survey,2)
ssurgoDatasetDict.pop(survey, None)
# -------------------------------------------------------------------- Make sure Datum is either NAD83 or WGS84 and soils layer is not missing
wrongDatum = []
missingSoilLayer = []
for survey in ssurgoDatasetDict:
soilShpPath = os.path.join(os.path.join(ssurgoDatasetDict[survey],"spatial"),"soilmu_a_" + survey.lower() + ".shp")
if arcpy.Exists(soilShpPath):
if not compareDatum(soilShpPath):
wrongDatum.append(survey)
else:
missingSoilLayer.append(survey)
if len(wrongDatum) > 0:
AddMsgAndPrint(" \n\tThe following local SSURGO datasets have a Datum other than WGS84 or NAD83:",2)
for survey in wrongDatum:
AddMsgAndPrint(" \t\t" + survey,2)
ssurgoDatasetDict.pop(survey, None)
if len(missingSoilLayer) > 0:
AddMsgAndPrint(" \n\tThe following local SSURGO datasets are missing their soils shapefile:",2)
for survey in missingSoilLayer:
AddMsgAndPrint(" \t\t" + survey,2)
ssurgoDatasetDict.pop(survey, None)
# -------------------------------------------------------------------- At this point everything checks out; Return Dictionary!
del wssLibraryList, missingSSAList, wrongDatum, missingSoilLayer
return ssurgoDatasetDict
except:
AddMsgAndPrint(" \nUnhandled exception (validateSSAs)", 2)
print_exception()
return ""
## ================================================================================================================
def createFGDB(regionChoice,outputFolder):
# This function will Create the RTSD File Geodatabase by importing an XML workspace schema.
# Depending on what region is being created is what .xml file will be used.
# Schema includes 2 feature datasets: FD_RTSD & Project Record. 6 feature classes will be created
# within FD_RTSD along with a topology. 1 feature class will be created within the ProjectRecord
# FD. Return new name of RTSD otherwise return empty string.
import datetime
try:
targetGDB = os.path.join(outputFolder,"TempRTSD.gdb")
if arcpy.Exists(targetGDB):
arcpy.Delete_management(targetGDB)
arcpy.CreateFileGDB_management(outputFolder,"TempRTSD")
# New fiscal year if month October, November and December
if datetime.datetime.now().strftime("%m") > 9 and datetime.datetime.now().strftime("%m") < 13:
FY = "FY" + str(datetime.datetime.now().strftime("%y") +1)
else:
FY = "FY" + str(datetime.datetime.now().strftime("%y"))
# Alask = NAD_1983_Alaska_Albers
if regionChoice == "Region 1 - AK":
xmlFile = os.path.dirname(sys.argv[0]) + os.sep + "RTSD_XMLWorkspace_Alaska.xml"
newName = "RTSD_Region_1_Alaska_" + FY
# Hawaii - Hawaii_Albers_Equal_Area_Conic WGS84
elif regionChoice == "Region 2 - HI":
xmlFile = os.path.dirname(sys.argv[0])+ os.sep + "RTSD_XMLWorkspace_Hawaii.xml"
newName = "RTSD_Region_2_Hawaii_" + FY
# PBSamoa - Hawaii_Albers_Equal_Area_Conic WGS84
elif regionChoice == "Region 2 - PBSamoa":
xmlFile = os.path.dirname(sys.argv[0]) + os.sep + "RTSD_XMLWorkspace_Hawaii.xml"
newName = "RTSD_Region_2_PBSamoa_" + FY
# Pacific Basin - Western Pacific Albers Equal Area Conic WGS84 Only PB630
elif regionChoice == "Region 2 - PacBasin":
xmlFile = os.path.dirname(sys.argv[0]) + os.sep + "RTSD_XMLWorkspace_PacBasin.xml"
newName ="RTSD_Region_2_PacBasin_" + FY
# Puerto Rico US Virgin Islands - USA Contiguous Albers Equal Area Conic USGS version NAD83
elif regionChoice == "Region 3 - PRUSVI":
xmlFile = os.path.dirname(sys.argv[0]) + os.sep + "RTSD_XMLWorkspace_CONUS.xml"
newName ="RTSD_Region_3_PRUSVI_" + FY
# CONUS - USA Contiguous Albers Equal Area Conic USGS version NAD83
else:
xmlFile = os.path.dirname(sys.argv[0]) + os.sep + "RTSD_XMLWorkspace_CONUS.xml"
newName = "RTSD_Region_" + str(regionChoice[regionChoice.find(" ")+1:]) + "_" + FY
# Return false if xml file is not found and delete targetGDB
if not arcpy.Exists(xmlFile):
AddMsgAndPrint(os.path.basename(xmlFile) + " was not found!",2)
arcpy.Delete_management(targetGDB)
return ""
arcpy.ImportXMLWorkspaceDocument_management(targetGDB, xmlFile, "SCHEMA_ONLY", "DEFAULTS")
# if Transactional Spatial Database exists delete it
if arcpy.Exists(os.path.join(outputFolder,newName + ".gdb")):
try:
AddMsgAndPrint("\n" + newName + ".gdb already exists, deleting",1)
arcpy.Delete_management(os.path.join(outputFolder,newName + ".gdb"))
except:
AddMsgAndPrint("\n Failed to Delte " + os.path.join(outputFolder,newName + ".gdb",2))
return ""
arcpy.Rename_management(targetGDB,newName)
arcpy.RefreshCatalog(os.path.join(outputFolder,newName + '.gdb'))
AddMsgAndPrint("\n" + "Successfully Created RTSD File GDB: " + newName + ".gdb")
del targetGDB,FY,xmlFile
return newName + ".gdb"
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return ""
except:
AddMsgAndPrint("Unhandled exception (createFGDB)", 2)
print_exception()
return ""
## ================================================================================================================
def parseDatumAndProjection(spatialReference):
# This functions extracts the Datum and Projection name from the user-defined
# spatial Reference. If the Datum is NAD83 then a transformation method will
# set as an env transformation method other wise none will be applied.
# Datum and Projection Name are returned.
#
# Not sure if this even helps b/c the append tool does not honor env
# trans method. Does this mean I have to project? I tried 3 differnent apppend
# tests and they all generated the same results leading me that ArcGIS intelligently
# applies a transformation method but I can't figure out which one. If I look at the
# results of the append tool and look at the env settings the trans method is set
# to NAD27 to NAD83.....WTF???
try:
#---------- Gather Spatial Reference info ----------------------------
# Create the GCS WGS84 spatial reference and datum name using the factory code
WGS84_sr = arcpy.SpatialReference(4326)
WGS84_datum = WGS84_sr.datumName
# Parse Datum and GCS from user spatial reference
userDatum_Start = spatialReference.find("DATUM") + 7
userDatum_Stop = spatialReference.find(",", userDatum_Start) - 1
userDatum = spatialReference[userDatum_Start:userDatum_Stop]
userProjection_Start = spatialReference.find("[") + 2
userProjection_Stop = spatialReference.find(",",userProjection_Start) - 1
userProjectionName = spatialReference[userProjection_Start:userProjection_Stop]
del userDatum_Start
del userDatum_Stop
del userProjection_Start
del userProjection_Stop
if userProjectionName != "" or userDatum != "":
AddMsgAndPrint(" \nUser-Defined Spatial Reference System:",0)
AddMsgAndPrint(" \tCoordinate System: " + userProjectionName,0)
AddMsgAndPrint(" \tDatum: " + userDatum,0)
# user spatial reference is the same as WGS84
if WGS84_datum == userDatum:
AddMsgAndPrint(" \n\tNo Datum Transformation method required", 1)
return userDatum,userProjectionName
# user datum is NAD83; apply trans method based on user input
elif userDatum == "D_North_American_1983":
if AOI == "CONUS":
tm = "NAD_1983_To_WGS_1984_5"
elif AOI == "Alaska":
tm = "NAD_1983_To_WGS_1984_5"
elif AOI == "Hawaii":
tm = "NAD_1983_To_WGS_1984_3"
elif AOI == "Puerto Rico and U.S. Virgin Islands":
tm = "NAD_1983_To_WGS_1984_5"
elif AOI == "Other":
tm = "NAD_1983_To_WGS_1984_1"
PrintMsg(" \n\tWarning! No coordinate shift will being applied", 0)
else:
raise MyError, "Invalid geographic region (" + AOI + ")"
arcpy.env.outputCoordinateSystem = spatialReference
arcpy.env.geographicTransformations = tm
AddMsgAndPrint(" \n\tUsing Datum Transformation Method '" + tm + "' for " + AOI, 1)
return userDatum,userProjectionName
# user datum was something other than NAD83 or WGS84
else:
raise MyError, " \n\tWarning! No Datum Transformation could be applied to " + userProjectionName + ".......EXITING!"
return "",""
# Could not parse CS name and datum
else:
raise MyError, " \n\tCould not extract Spatial Reference Properties........Halting import process"
return "",""
except MyError, e:
AddMsgAndPrint(str(e) + " \n", 2)
return "",""
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return "",""
except:
AddMsgAndPrint(" \nUnhandled exception (parseDatumAndProjection)", 2)
print_exception()
return "",""
## ================================================================================================================
def compareDatum(fc):
# Return True if fc datum is either WGS84 or NAD83
try:
# Create Spatial Reference of the input fc. It must first be converted in to string in ArcGIS10
# otherwise .find will not work.
fcSpatialRef = str(arcpy.CreateSpatialReference_management("#",fc,"#","#","#","#"))
FCdatum_start = fcSpatialRef.find("DATUM") + 7
FCdatum_stop = fcSpatialRef.find(",", FCdatum_start) - 1
fc_datum = fcSpatialRef[FCdatum_start:FCdatum_stop]
# Create the GCS WGS84 spatial reference and datum name using the factory code
WGS84_sr = arcpy.SpatialReference(4326)
WGS84_datum = WGS84_sr.datumName
NAD83_datum = "D_North_American_1983"
# Input datum is either WGS84 or NAD83; return true
if fc_datum == WGS84_datum or fc_datum == NAD83_datum:
del fcSpatialRef
del FCdatum_start
del FCdatum_stop
del fc_datum
del WGS84_sr
del WGS84_datum
del NAD83_datum
return True
# Input Datum is some other Datum; return false
else:
del fcSpatialRef
del FCdatum_start
del FCdatum_stop
del fc_datum
del WGS84_sr
del WGS84_datum
del NAD83_datum
return False
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return False
except:
print_exception()
return False
## ===============================================================================================================
def splitThousands(someNumber):
# will determine where to put a thousands seperator if one is needed.
# Input is an integer. Integer with or without thousands seperator is returned.
try:
return re.sub(r'(\d{3})(?=\d)', r'\1,', str(someNumber)[::-1])[::-1]
except:
print_exception()
return someNumber
## ===============================================================================================================
def createTopology(RTSD_FD):
try:
AddMsgAndPrint("\n" + "Creating Topology and Rules",0)
#env.workspace = RTSD_FD
arcpy.SetProgressor("step", "Creating Topology", 0, 3, 1)
# Create New topology
arcpy.SetProgressorLabel("Creating Topology")
arcpy.CreateTopology_management(RTSD_FD, "FD_RTSD_Topology", 0.001)
newTopology = os.path.join(RTSD_FD,"FD_RTSD_Topology")
AddMsgAndPrint(" \tCreated Topology: FD_RTSD_Topology at 0.1m cluster tolerance",0)
arcpy.SetProgressorPosition()
# Add feature classes to topology
arcpy.SetProgressorLabel("Creating Topology: Adding Feature Classes to Topology")
arcpy.AddFeatureClassToTopology_management(newTopology, os.path.join(RTSD_FD, "MUPOLYGON"), 1, 1)
arcpy.AddFeatureClassToTopology_management(newTopology, os.path.join(RTSD_FD,"MUPOINT"), 1, 1)
arcpy.AddFeatureClassToTopology_management(newTopology, os.path.join(RTSD_FD,"MULINE"), 1, 1)
arcpy.AddFeatureClassToTopology_management(newTopology, os.path.join(RTSD_FD,"FEATPOINT"), 1, 1)
arcpy.AddFeatureClassToTopology_management(newTopology, os.path.join(RTSD_FD,"FEATLINE"), 1, 1)
AddMsgAndPrint(" \tAdded 5 Feature Classes to participate in the Topology",0)
arcpy.SetProgressorPosition()
# Add Topology Rules
arcpy.SetProgressorLabel("Creating Topology: Adding Rules to Topology")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Overlap (Area)", "MUPOLYGON")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Have Gaps (Area)", "MUPOLYGON")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Overlap (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Intersect (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Self-Overlap (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Have Pseudo-Nodes (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Self-Intersect (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Be Single Part (Line)", "FEATLINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Be Disjoint (Point)", "FEATPOINT")
arcpy.AddRuleToTopology_management(newTopology, "Must Be Disjoint (Point)", "MUPOINT")
#arcpy.AddRuleToTopology_management(newTopology, "Must Be Properly Inside (Point-Area)","FEATPOINT","MUPOLYGON")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Overlap (Line)", "MULINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Intersect (Line)", "MULINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Self-Overlap (Line)", "MULINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Have Pseudo-Nodes (Line)", "MULINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Not Self-Intersect (Line)", "MULINE")
arcpy.AddRuleToTopology_management(newTopology, "Must Be Single Part (Line)", "MULINE")
AddMsgAndPrint(" \tAdded 17 rules to the Topology",0)
arcpy.SetProgressorPosition()
arcpy.ResetProgressor()
arcpy.RefreshCatalog(RTSD_FD)
del newTopology
return True
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return False
except:
print_exception()
return False
## ===============================================================================================================
def ImportFeatureFiles(ssurgoDatasetDict):
# This function will import the feature files into the featdesc table within
# RTSD. Spatial version and FEATKEY are not imported. Warns user if feature
# file is empty. Return True if all worked well.
try:
AddMsgAndPrint("\n" + "Importing Feature Files",0)
arcpy.SetProgressor("step", "Importing Feature Files", 0, len(ssurgoDatasetDict), 1)
featDescTable = os.path.join(FGDBpath,"featdesc")
# Put all the field names in a list; used to initiate insertCursor object
fieldList = arcpy.ListFields(featDescTable)
nameOfFields = []
for field in fieldList:
if field.type != "OID":
nameOfFields.append(field.name)
# Initiate Cursor to add rows
cursor = arcpy.da.InsertCursor(featDescTable, nameOfFields)
missingFiles = []
importError = []
importedCorrectly = 0
emptyFiles = 0
for SSA in ssurgoDatasetDict:
arcpy.SetProgressorLabel("Importing Feature File: " + SSA)
# Paths to individual SSURGO layers
specFeatDescFile = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilsf_t_" + SSA.lower() + ".txt")
# check if feature file exists
if os.path.exists(specFeatDescFile):
# Continue if the feature file contains values. Not Empty file
if os.path.getsize(specFeatDescFile) > 0:
# Number of records in the feature file
textFileRecords = sum(1 for row in csv.reader(open(specFeatDescFile, 'rb'), delimiter='|', quotechar='"'))
F = csv.reader(open(specFeatDescFile, 'rb'), delimiter='|', quotechar='"')
i = 0 # row counter
for rowInF in F:
try:
i+=1
newRow = rowInF[0],rowInF[2],rowInF[3],rowInF[4]
cursor.insertRow(newRow)
del newRow
except:
AddMsgAndPrint(" \tFailed to import line #" + str(i) + " for " + SSA + " feature file",2)
continue
#AddMsgAndPrint(" \tSuccessfully Imported: " + str(textFileRecords) + " records",0)
if i != textFileRecords:
AddMsgAndPrint(" \tIncorrect # of records inserted for " + SSA,2)
AddMsgAndPrint( "\t\tFeature file records: " + str(textFileRecords),2)
AddMsgAndPrint( "\t\tRecords Inserted: " + str(i),2)
importError.append(SSA)
else:
importedCorrectly += 1
del textFileRecords,F,i
# feature file is empty, print a warning
else:
AddMsgAndPrint(" \t" + SSA + " feature file is empty",1)
emptyFiles += 1
else:
AddMsgAndPrint(" \t" + SSA + " feature file is missing",2)
missingFiles.append(SSA)
arcpy.SetProgressorPosition()
del specFeatDescFile
# Print any missing surveys
if len(missingFiles) > 0:
AddMsgAndPrint(" \n\tThe following SSAs had missing feature files:",2)
for ssa in missingFiles:
AddMsgAndPrint( "\t\t" + ssa,2)
# Print any SSAs that had errors importing
if len(importError) > 0:
AddMsgAndPrint(" \n\tThe following SSAs had errors importing - Feature files should be looked at:",2)
for ssa in importError:
AddMsgAndPrint( "\t\t" + ssa,2)
if (emptyFiles + importedCorrectly) == len(ssurgoDatasetDict):
AddMsgAndPrint("\tAll " + str(len(ssurgoDatasetDict)) + " Feature Files Successfully Imported",0)
else:
AddMsgAndPrint("\tOnly " + str(importedCorrectly) + " Feature Files were successfully imported",2)
del featDescTable, fieldList, field, nameOfFields, cursor, missingFiles, importError, importedCorrectly, emptyFiles
arcpy.ResetProgressor()
return True
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return False
except:
print_exception()
return False
## ===============================================================================================================
def updateAliasNames(regionChoice,fdPath):
# Update the alias name of every feature class in the RTSD including the project record.
# i.e. alias name for MUPOLYGON = Region 10 - Mapunit Polygon
try:
aliasUpdate = 0
regionNumber = str([int(s) for s in regionChoice.split() if s.isdigit()][0])
if arcpy.Exists(os.path.join(fdPath,'FEATLINE')):
arcpy.AlterAliasName(os.path.join(fdPath,'FEATLINE'), "RTSD R" + regionNumber + " - Special Feature Lines") #FEATLINE
aliasUpdate += 1
if arcpy.Exists(os.path.join(fdPath,'FEATPOINT')):
arcpy.AlterAliasName(os.path.join(fdPath,'FEATPOINT'), "RTSD R" + regionNumber + " - Special Feature Points") #FEATPOINT
aliasUpdate += 1
if arcpy.Exists(os.path.join(fdPath,'MUPOLYGON')):
arcpy.AlterAliasName(os.path.join(fdPath,'MUPOLYGON'), "RTSD R" + regionNumber + " - Mapunit Polygon") #MUPOLYGON
aliasUpdate += 1
if arcpy.Exists(os.path.join(fdPath,'SAPOLYGON')):
arcpy.AlterAliasName(os.path.join(fdPath,'SAPOLYGON'), "RTSD R" + regionNumber + " - Survey Area Polygon") #SAPOLYGON
aliasUpdate += 1
if arcpy.Exists(os.path.join(fdPath,'MULINE')):
arcpy.AlterAliasName(os.path.join(fdPath,'MULINE'), "RTSD R" + regionNumber + " - Mapunit Line") #MULINE
aliasUpdate += 1
if arcpy.Exists(os.path.join(fdPath,'MUPOINT')):
arcpy.AlterAliasName(os.path.join(fdPath,'MUPOINT'), "RTSD R" + regionNumber + " - Mapunit Point") #MUPOINT
aliasUpdate += 1
if arcpy.Exists(os.path.join(FGDBpath + os.sep + 'ProjectRecord' + os.sep + 'Project_Record')):
arcpy.AlterAliasName(os.path.join(FGDBpath + os.sep + 'ProjectRecord' + os.sep + 'Project_Record'), "RTSD R" + regionNumber + " - Project Record") #Project_Record
aliasUpdate += 1
if aliasUpdate == 7:
return True
else:
return False
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
return False
except:
print_exception()
return False
## ====================================== Main Body ===========================================================
# Import modules
import arcpy, sys, string, os, time, datetime, re, traceback, csv
from arcpy import env
from datetime import datetime
# ---------------------------------------------------------------------------------------Input Arguments
#
# Parameter # 1: (Required) Name of new file geodatabase to create
regionChoice = arcpy.GetParameterAsText(0) # User selects what region to create MRSD
#regionChoice = "Region 13"
# Parameter # 2: (Required) Input Directory where the new FGDB will be created.
outputFolder = arcpy.GetParameterAsText(1)
#outputFolder = r'C:\Temp\export'
# Parameter # 2: (Required) Input Directory where the original SDM spatial and tabular data exist.
wssLibrary = arcpy.GetParameterAsText(2)
#wssLibrary = r'K:\FY2014_SSURGO_R10_download'
# Path to the Master Regional table that contains SSAs by region with extra extent
#regionalTable = os.path.dirname(sys.argv[0]) + os.sep + "SSURGO_Soil_Survey_Area.gdb\junkTable"
regionalTable = os.path.join(os.path.join(os.path.dirname(sys.argv[0]),"SSURGO_Soil_Survey_Area.gdb"),"SSA_by_Region_11_buffer")
# Bail if reginal master table is not found
if not arcpy.Exists(regionalTable):
raise MyError, "\n" + "Regional Master Table is missing from " + os.path.dirname(sys.argv[0])
startTime = datetime.now()
env.overwriteOutput = True
# The entire Main code in a try statement....Let the fun begin!
try:
# Get a list of Regional areasymbols to download from the Regional Master Table. [u'WI001, u'WI003']
regionalASlist = getRegionalAreaSymbolList(regionalTable,regionChoice)
# Exit if list of regional areasymbol list is empty
if not len(regionalASlist) > 0:
raise MyError, "\n\n" + "No Areasymbols were selected from Regional SSA Ownership table. Possible problem with table.....EXITING"
# sort the regional list
regionalASlist.sort()
# check for missing Regional SSURGO Datasets or duplicates; Exit if any are found
AddMsgAndPrint("\n" + "Validating local Regional SSURGO datasets.......",0)
ssurgoDatasetDict = validateSSAs(regionalASlist,wssLibrary)
if len(ssurgoDatasetDict) < 1:
raise MyError, "\nAll " + regionChoice + " SSURGO datasets are missing from " + os.path.basename(wssLibrary) + " directory \n\tThere must also not be duplicate SSURGO datasets"
# There are some SSAs missing from local library
elif len(regionalASlist) != len(ssurgoDatasetDict):
AddMsgAndPrint("\n" + regionChoice + " is assigned " + str(len(regionalASlist)) + " SSAs --- Missing " + str(len(regionalASlist) - len(ssurgoDatasetDict)) + " SSAs" ,2)
AddMsgAndPrint("\nALL SSURGO datasets assigned to " + regionChoice + " must be present to continue",2)
raise MyError, "Download missing SSURGO Datasets using the 'Download SSURGO by Region' tool"
#AddMsgAndPrint("Only " + str(len(ssurgoDatasetDict)) + " out of " + str(len(regionalASlist)) + " surveys will be imported to create the " + regionChoice + " Transactional Spatial Database",2)
else:
AddMsgAndPrint("\n" + str(len(regionalASlist)) + " surveys will be merged to create the " + regionChoice + " Transactional Spatial Database", 0)
# --------------------------------------------------------------------------------------Create Empty Regional Transactional File Geodatabase
RTSDname = createFGDB(regionChoice, outputFolder)
if RTSDname == "":
raise MyError, " \n Failed to Initiate Empty Regional Transactional Database. Error in createFGDB() function. Exiting!"
# Path to Regional FGDB
FGDBpath = os.path.join(outputFolder,RTSDname)
# Path to feature dataset that contains SSURGO feature classes
FDpath = os.path.join(FGDBpath,"FD_RTSD")
# SSURGO layer Name
soilFC = "MUPOLYGON"
muLineFC = "MULINE"
muPointFC = "MUPOINT"
soilSaFC = "SAPOLYGON"
featPointFC = "FEATPOINT"
featLineFC = "FEATLINE"
# Set environment variables
env.workspace = FDpath
# Parse Datum from MUPOLYGON; can only get datum from a GCS not a projected CS
spatialRef = str(arcpy.CreateSpatialReference_management("#",soilFC,"#","#","#","#"))
userDatum_Start = spatialRef.find("DATUM") + 7
userDatum_Stop = spatialRef.find(",", userDatum_Start) - 1
userDatum = spatialRef[userDatum_Start:userDatum_Stop]
AddMsgAndPrint(" \tOutput Coordinate System: " + arcpy.Describe(soilFC).spatialReference.name,0)
AddMsgAndPrint(" \tOutput Datum: " + userDatum,0)
if userDatum == "D_North_American_1983":
AddMsgAndPrint(" \tGeographic Transformation: WGS_1984_(ITRF00)_To_NAD_1983",0 )
env.geographicTransformations = "WGS_1984_(ITRF00)_To_NAD_1983" # WKID 108190
env.outputCoordinateSystem = spatialRef
arcpy.SetProgressorLabel("Gathering information about Soil Survey datasets...")
# ------------------------------------------------------------------------------------- Establish Dictionaries, lists and Fieldmappings
# Dictionary containing approx center of SSA (key) and the SSURGO layer path (value)
soilShpDict = dict() # {-4002.988250799742: 'K:\\FY2014_SSURGO_R10_download\\soil_wi063\\spatial\\soilmu_a_wi063.shp'}
muLineShpDict = dict()
muPointShpDict = dict()
soilSaShpDict = dict()
featPointShpDict = dict()
featLineShpDict = dict()
# lists containing SSURGO layer paths sorted according to the survey center key
# This list will be passed over to the Merge command
soilShpList = list()
muLineShpList = list()
muPointShpList = list()
soilSaShpList = list()
featPointShpList = list()
featLineShpList = list()
# Create FieldMappings objects that will contain all of the fields from each survey
# (fieldmap). FMs will be used to remove every field but AREASYMBOL, FEATSYM, MUSYM
soilsFM = arcpy.FieldMappings()
muLineFM = arcpy.FieldMappings()
muPointFM = arcpy.FieldMappings()
soilSaFM = arcpy.FieldMappings()
featPointFM = arcpy.FieldMappings()
featLineFM = arcpy.FieldMappings()
# list containing the (Xcenter * Ycenter) for every SSURGO soil layer
extentList = list()
# ------------------------------------------------------------------------------------- Populate Dictionaries, lists and Fieldmappings
for SSA in ssurgoDatasetDict:
# Paths to individual SSURGO layers
soilShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilmu_a_" + SSA.lower() + ".shp")
muLineShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilmu_l_" + SSA.lower() + ".shp")
muPointShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilmu_p_" + SSA.lower() + ".shp")
soilSaShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilsa_a_" + SSA.lower() + ".shp")
featPointShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilsf_p_" + SSA.lower() + ".shp")
featLineShpPath = os.path.join(os.path.join(ssurgoDatasetDict[SSA],"spatial"),"soilsf_l_" + SSA.lower() + ".shp")
# Calculate the approximate center of a given survey
desc = arcpy.Describe(soilShpPath)
shpExtent = desc.extent
XCntr = (shpExtent.XMin + shpExtent.XMax) / 2.0
YCntr = (shpExtent.YMin + shpExtent.YMax) / 2.0
surveyCenter = XCntr * YCntr # approximate center of survey
# Assign {-4002.988250799742: 'K:\\FY2014_SSURGO_R10_download\\soil_wi063\\spatial\\soilmu_a_wi063.shp'}
soilShpDict[surveyCenter] = soilShpPath
muLineShpDict[surveyCenter] = muLineShpPath
muPointShpDict[surveyCenter] = muPointShpPath
soilSaShpDict[surveyCenter] = soilSaShpPath
featPointShpDict[surveyCenter] = featPointShpPath
featLineShpDict[surveyCenter] = featLineShpPath
extentList.append(surveyCenter)
# Add all fields from all of the SSURGO layers into their respective fieldMappings
soilsFM.addTable(soilShpPath)
muLineFM.addTable(muLineShpPath)
muPointFM.addTable(muPointShpPath)
soilSaFM.addTable(soilSaShpPath)
featPointFM.addTable(featPointShpPath)
featLineFM.addTable(featLineShpPath)
del soilShpPath, muLineShpPath, muPointShpPath, soilSaShpPath, featPointShpPath, featLineShpPath, desc, shpExtent, XCntr, YCntr, surveyCenter
# ---------------------------------------------------------------------------------------------------------- Begin the Merge Process
# Sort shapefiles by extent so that the drawing order is continous
extentList.sort()
# number of soil layers to merge should be equal to number of Regional SSAs
#if len(soilShpDict) == len(regionalASlist):
if len(soilShpDict) > 0:
# Add SSURGO paths to their designated lists according to the survey's center so that they draw continously
# If the layer has features then add it to the merge list otherwise skip it. This was added b/c it turns
# out that empty mapunit point .shp are in line geometry and not point geometry
for surveyCenter in extentList:
soilShpList.append(soilShpDict[surveyCenter])
soilSaShpList.append(soilSaShpDict[surveyCenter])
if int(arcpy.GetCount_management(muLineShpDict[surveyCenter]).getOutput(0)) > 0:
muLineShpList.append(muLineShpDict[surveyCenter])
if int(arcpy.GetCount_management(muPointShpDict[surveyCenter]).getOutput(0)) > 0:
muPointShpList.append(muPointShpDict[surveyCenter])
if int(arcpy.GetCount_management(featPointShpDict[surveyCenter]).getOutput(0)) > 0:
featPointShpList.append(featPointShpDict[surveyCenter])
if int(arcpy.GetCount_management(featLineShpDict[surveyCenter]).getOutput(0)) > 0:
featLineShpList.append(featLineShpDict[surveyCenter])
# Some reason some surveys are missing......Exit
else:
if arcpy.Exists(FGDBpath):
arcpy.Delete_management(FGDBpath)
raise MyError, " \n\n All surveys had incompatible datums! Datum needs to be in NAD83 or WGS84."
# set progressor object which allows progress information to be passed for every merge complete
arcpy.SetProgressor("step", "Beginning the merge process...", 0, 6, 1)
AddMsgAndPrint("\n" + "Beginning the merge process",0)
# --------------------------------------------------------------------------Merge Soil Mapunit Polygons
arcpy.SetProgressorLabel("Merging " + str(len(soilShpList)) + " Soil Mapunit Layers")
try:
for field in soilsFM.fields:
if field.name not in ["AREASYMBOL","MUSYM", "MUKEY", "MUNAME"]:
soilsFM.removeFieldMap(soilsFM.findFieldMapIndex(field.name))
arcpy.Merge_management(soilShpList, os.path.join(FDpath, soilFC), soilsFM)
#arcpy.Append_management(soilShpList, os.path.join(FDpath, soilFC), "NO_TEST", soilsFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Soil Mapunit Polygons",0)
arcpy.SetProgressorPosition()
except:
print_exception()
# --------------------------------------------------------------------------Merge Soil Mapunit Lines
if len(muLineShpList) > 0:
arcpy.SetProgressorLabel("Merging " + str(len(muLineShpList)) + " SSURGO Soil Mapunit Line Layers")
# Transactional FGDB; remove any field other than AREASYMBOL and MUSYM
for field in muLineFM.fields:
if field.name not in ["AREASYMBOL","MUSYM"]:
muLineFM.removeFieldMap(muLineFM.findFieldMapIndex(field.name))
arcpy.Merge_management(muLineShpList, os.path.join(FDpath, muLineFC), muLineFM)
#arcpy.Append_management(muLineShpList, os.path.join(FDpath, muLineFC), "NO_TEST", muLineFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Soil Mapunit Lines",0)
else:
AddMsgAndPrint(" \tNo SSURGO Soil Mapunit Lines to merge",0)
arcpy.SetProgressorPosition()
# --------------------------------------------------------------------------Merge Soil Mapunit Points
if len(muPointShpList) > 0:
arcpy.SetProgressorLabel("Merging " + str(len(muPointShpList)) + "SSURGO Soil Mapunit Point Layers")
# Transactional FGDB; remove any field other than AREASYMBOL and MUSYM
for field in muPointFM.fields:
if field.name not in ["AREASYMBOL","MUSYM"]:
muPointFM.removeFieldMap(muPointFM.findFieldMapIndex(field.name))
arcpy.Merge_management(muPointShpList, os.path.join(FDpath, muPointFC), muPointFM)
#arcpy.Append_management(muPointShpList, os.path.join(FDpath, muPointFC), "NO_TEST", muPointFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Soil Mapunit Points",0)
else:
AddMsgAndPrint(" \tNo SSURGO Soil Mapunit Points to merge",0)
arcpy.SetProgressorPosition()
# --------------------------------------------------------------------------Merge Soil Survey Area
arcpy.SetProgressorLabel("Merging " + str(len(soilSaShpList)) + " SSURGO Soil Survey Area Layers")
# Transactional FGDB; remove any field other than AREASYMBOL and MUSYM
for field in soilSaFM.fields:
if field.name not in ["AREASYMBOL"]:
soilSaFM.removeFieldMap(soilSaFM.findFieldMapIndex(field.name))
arcpy.Merge_management(soilSaShpList, os.path.join(FDpath, soilSaFC), soilSaFM)
#arcpy.Append_management(soilSaShpList, os.path.join(FDpath, soilSaFC), "NO_TEST", soilSaFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Soil Survey Area Polygons",0)
arcpy.SetProgressorPosition()
# --------------------------------------------------------------------------Merge Special Point Features
if len(featPointShpList) > 0:
arcpy.SetProgressorLabel("Merging " + str(len(featPointShpList)) + " SSURGO Special Point Feature Layers")
# Transactional FGDB; remove any field other than AREASYMBOL and FEATSYM
for field in featPointFM.fields:
if field.name not in ["AREASYMBOL", "FEATSYM", "FEATKEY"]:
featPointFM.removeFieldMap(featPointFM.findFieldMapIndex(field.name))
arcpy.Merge_management(featPointShpList, os.path.join(FDpath, featPointFC), featPointFM)
#arcpy.Append_management(featPointShpList, os.path.join(FDpath, featPointFC), "NO_TEST", featPointFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Special Point Features",0)
else:
AddMsgAndPrint(" \tNo SSURGO Soil Special Point Features to merge",0)
arcpy.SetProgressorPosition()
# --------------------------------------------------------------------------Merge Special Line Features
arcpy.SetProgressorLabel("Merging " + str(len(featLineShpList)) + " SSURGO Special Line Feature Layers")
if len(featLineShpList) > 0:
# Transactional FGDB; remove any field other than AREASYMBOL and FEATSYM
for field in featLineFM.fields:
if field.name not in ["AREASYMBOL", "FEATSYM", "FEATKEY"]:
featLineFM.removeFieldMap(featLineFM.findFieldMapIndex(field.name))
arcpy.Merge_management(featLineShpList, os.path.join(FDpath, featLineFC), featLineFM)
#arcpy.Append_management(featLineShpList, os.path.join(FDpath, featLineFC), "NO_TEST", featLineFM)
AddMsgAndPrint(" \tSuccessfully merged SSURGO Special Line Features",0)
else:
AddMsgAndPrint(" \tNo SSURGO Special Line Features to merge",0)
arcpy.SetProgressorPosition()
arcpy.ResetProgressor()
# --------------------------------------------------------------------------------------------- Import Feature descriptions
if not ImportFeatureFiles(ssurgoDatasetDict):
AddMsgAndPrint("\nError importing feature files into the featdesc table",2)
# ---------------------------------------------------------------------------------------------------------- Setup Topology
# Validate Topology with a cluster of 0.1 meters
if createTopology(FDpath):
arcpy.SetProgressorLabel("Validating Topology at 0.1 meters")
arcpy.ValidateTopology_management(os.path.join(FDpath,"FD_RTSD_Topology"))
AddMsgAndPrint(" \tValidated Topology at 0.1 meters",0)
else:
AddMsgAndPrint(" \n\tFailed to Create Topology. Create Topology Manually",2)
# Create Relationship class between project_record and SAPOLYGON feature class
arcpy.SetProgressorLabel("Creating Relationship Class between Project_Record & SAPOLYGON")
prjRecTable = os.path.join(FGDBpath,'ProjectRecord' + os.sep + 'Project_Record')
saPolyPath = os.path.join(FDpath,soilSaFC)
relName = "x" + prjRecTable.capitalize() + "_" + soilSaFC
arcpy.CreateRelationshipClass_management(prjRecTable, saPolyPath, relName, "SIMPLE", "> SAPOLYGON", "< Project_Record", "NONE", "ONE_TO_ONE", "NONE", "AREASYMBOL", "AREASYMBOL", "", "")
AddMsgAndPrint("\n" + "Successfully Created Relationship Class")
arcpy.SetProgressorLabel("Compacting " + os.path.basename(FGDBpath))
arcpy.Compact_management(FGDBpath)
AddMsgAndPrint("\n" + "Successfully Compacted " + os.path.basename(FGDBpath))
if updateAliasNames(regionChoice, FDpath):
AddMsgAndPrint("\nSuccessfully Updated Alias Names for Feature Classes within " + os.path.basename(FGDBpath))
else:
AddMsgAndPrint("\nUnable to Update Alias Names for Feature Classes within " + os.path.basename(FGDBpath),2)
# -----------------------------------------------------------------------------------------
AddMsgAndPrint(" \n*****************************************************************************************",1)
AddMsgAndPrint("Total # of SSURGO Datasets Appended: " + str(splitThousands(len(soilShpList))),1)
AddMsgAndPrint(" \tTotal # of Mapunit Polygons: " + str(splitThousands(arcpy.GetCount_management(FDpath + os.sep + soilFC).getOutput(0))),1)
AddMsgAndPrint(" \tTotal # of Mapunit Lines: " + str(splitThousands(arcpy.GetCount_management(FDpath + os.sep + muLineFC).getOutput(0))),1)
AddMsgAndPrint(" \tTotal # of Mapunit Points: " + str(splitThousands(arcpy.GetCount_management(FDpath + os.sep + muPointFC).getOutput(0))),1)
AddMsgAndPrint(" \tTotal # of Special Feature Points: " + str(splitThousands(arcpy.GetCount_management(FDpath + os.sep + featPointFC).getOutput(0))),1)
AddMsgAndPrint(" \tTotal # of Special Feature Lines: " + str(splitThousands(arcpy.GetCount_management(FDpath + os.sep + featLineFC).getOutput(0))),1)
arcpy.RefreshCatalog(outputFolder)
endTime = datetime.now()
AddMsgAndPrint(" \nTotal Time: " + str(endTime - startTime),0)
try:
del regionChoice
del outputFolder
del wssLibrary
del regionalTable
del ssurgoDatasetDict
del RTSDname
del FGDBpath
del FDpath
del soilFC
del muLineFC
del muPointFC
del soilSaFC
del featPointFC
del featLineFC
del spatialRef
del userDatum_Start
del userDatum_Stop
del userDatum
del soilShpDict
del muLineShpDict
del muPointShpDict
del soilSaShpDict
del featPointShpDict
del featLineShpDict
del soilShpList
del muLineShpList
del muPointShpList
del soilSaShpList
del featPointShpList
del featLineShpList
del soilsFM
del muLineFM
del muPointFM
del soilSaFM
del featPointFM
del featLineFM
del extentList
del endTime
del prjRecTable
del saPolyPath
del relName
except:
pass
# This is where the fun ends!
except arcpy.ExecuteError:
AddMsgAndPrint(arcpy.GetMessages(2),2)
except:
print_exception()
| gpl-2.0 | -6,511,425,020,220,025,000 | 43.440509 | 200 | 0.593824 | false |
tectronics/hop | src/contrib/hod/hodlib/Hod/hadoop.py | 15 | 27500 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""define WorkLoad as abstract interface for user job"""
# -*- python -*-
import os, time, sys, shutil, exceptions, re, threading, signal, urllib, pprint, math
from HTMLParser import HTMLParser
import xml.dom.minidom
import xml.dom.pulldom
from xml.dom import getDOMImplementation
from hodlib.Common.util import *
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.miniHTMLParser import miniHTMLParser
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Common.tcp import tcpError, tcpSocket
reCommandDelimeterString = r"(?<!\\);"
reCommandDelimeter = re.compile(reCommandDelimeterString)
class hadoopConfig:
def __create_xml_element(self, doc, name, value, description, final = False):
prop = doc.createElement("property")
nameP = doc.createElement("name")
string = doc.createTextNode(name)
nameP.appendChild(string)
valueP = doc.createElement("value")
string = doc.createTextNode(value)
valueP.appendChild(string)
if final:
finalP = doc.createElement("final")
string = doc.createTextNode("true")
finalP.appendChild(string)
desc = doc.createElement("description")
string = doc.createTextNode(description)
desc.appendChild(string)
prop.appendChild(nameP)
prop.appendChild(valueP)
if final:
prop.appendChild(finalP)
prop.appendChild(desc)
return prop
def gen_site_conf(self, confDir, tempDir, numNodes, hdfsAddr, mrSysDir,\
mapredAddr=None, clientParams=None, serverParams=None,\
finalServerParams=None, clusterFactor=None):
if not mapredAddr:
mapredAddr = "dummy:8181"
implementation = getDOMImplementation()
doc = implementation.createDocument('', 'configuration', None)
comment = doc.createComment(
"This is an auto generated hadoop-site.xml, do not modify")
topElement = doc.documentElement
topElement.appendChild(comment)
description = {}
paramsDict = { 'mapred.job.tracker' : mapredAddr , \
'fs.default.name' : "hdfs://" + hdfsAddr, \
'hadoop.tmp.dir' : tempDir, \
}
paramsDict['mapred.system.dir'] = mrSysDir
# mapred-default.xml is no longer used now.
numred = int(math.floor(clusterFactor * (int(numNodes) - 1)))
paramsDict['mapred.reduce.tasks'] = str(numred)
# end
# for all the above vars generated, set the description
for k, v in paramsDict.iteritems():
description[k] = 'Hod generated parameter'
# finalservelParams
if finalServerParams:
for k, v in finalServerParams.iteritems():
if not description.has_key(k):
description[k] = "final server parameter"
paramsDict[k] = v
# servelParams
if serverParams:
for k, v in serverParams.iteritems():
if not description.has_key(k):
# if no final value for same param is mentioned
description[k] = "server parameter"
paramsDict[k] = v
# clientParams
if clientParams:
for k, v in clientParams.iteritems():
if not description.has_key(k) or description[k] == "server parameter":
# Just add, if no final value for same param is mentioned.
# Replace even if server param is mentioned for same config variable
description[k] = "client-side parameter"
paramsDict[k] = v
# generate the xml elements
for k,v in paramsDict.iteritems():
if ( description[k] == "final server parameter" or \
description[k] == "Hod generated parameter" ):
final = True
else: final = False
prop = self.__create_xml_element(doc, k, v, description[k], final)
topElement.appendChild(prop)
siteName = os.path.join(confDir, "hadoop-site.xml")
sitefile = file(siteName, 'w')
print >> sitefile, topElement.toxml()
sitefile.close()
class hadoopCluster:
def __init__(self, cfg, log):
self.__cfg = cfg
self.__log = log
self.__changedClusterParams = []
self.__hostname = local_fqdn()
self.__svcrgyClient = None
self.__nodePool = NodePoolUtil.getNodePool(self.__cfg['nodepooldesc'],
self.__cfg, self.__log)
self.__hadoopCfg = hadoopConfig()
self.jobId = None
self.mapredInfo = None
self.hdfsInfo = None
self.ringmasterXRS = None
def __get_svcrgy_client(self):
svcrgyUrl = to_http_url(self.__cfg['hod']['xrs-address'])
return hodXRClient(svcrgyUrl)
def __get_service_status(self):
serviceData = self.__get_service_data()
status = True
hdfs = False
mapred = False
for host in serviceData.keys():
for item in serviceData[host]:
service = item.keys()
if service[0] == 'hdfs.grid' and \
self.__cfg['gridservice-hdfs']['external'] == False:
hdfs = True
elif service[0] == 'mapred.grid':
mapred = True
if not mapred:
status = "mapred"
if not hdfs and self.__cfg['gridservice-hdfs']['external'] == False:
if status != True:
status = "mapred and hdfs"
else:
status = "hdfs"
return status
def __get_service_data(self):
registry = to_http_url(self.__cfg['hod']['xrs-address'])
serviceData = self.__svcrgyClient.getServiceInfo(
self.__cfg['hod']['userid'], self.__setup.np.getNodePoolId())
return serviceData
def __check_job_status(self):
failureCount = 0
status = False
state = 'Q'
userLimitsFirstFlag = True
while (state=='Q') or (state==False):
if hodInterrupt.isSet():
raise HodInterruptException()
jobInfo = self.__nodePool.getJobInfo()
state = jobInfo['job_state']
self.__log.debug('job state %s' % state)
if state == False:
failureCount += 1
if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']):
self.__log.debug('Number of retries reached max limit while querying job status')
break
time.sleep(self.__cfg['hod']['job-command-failure-interval'])
elif state!='Q':
break
else:
self.__log.debug('querying for job status after job-status-query-interval')
time.sleep(self.__cfg['hod']['job-status-query-interval'])
if self.__cfg['hod'].has_key('job-feasibility-attr') and \
self.__cfg['hod']['job-feasibility-attr']:
(status, msg) = self.__isJobFeasible()
if status == "Never":
self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \
"This cluster cannot be allocated now.")
return -1
elif status == False:
if userLimitsFirstFlag:
self.__log.critical(TORQUE_USER_LIMITS_EXCEEDED_MSG + msg + \
"This cluster allocation will succeed only after other " + \
"clusters are deallocated.")
userLimitsFirstFlag = False
if state and state != 'C':
status = True
return status
def __isJobFeasible(self):
return self.__nodePool.isJobFeasible()
def __get_ringmaster_client(self):
ringmasterXRS = None
ringList = self.__svcrgyClient.getServiceInfo(
self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(),
'ringmaster', 'hod')
if ringList and len(ringList):
if isinstance(ringList, list):
ringmasterXRS = ringList[0]['xrs']
else:
count = 0
waitTime = self.__cfg['hod']['allocate-wait-time']
while count < waitTime:
if hodInterrupt.isSet():
raise HodInterruptException()
ringList = self.__svcrgyClient.getServiceInfo(
self.__cfg['ringmaster']['userid'], self.__nodePool.getServiceId(),
'ringmaster',
'hod')
if ringList and len(ringList):
if isinstance(ringList, list):
ringmasterXRS = ringList[0]['xrs']
if ringmasterXRS is not None:
break
else:
time.sleep(1)
count = count + 1
# check to see if the job exited by any chance in that time:
if (count % self.__cfg['hod']['job-status-query-interval'] == 0):
if not self.__check_job_status():
break
return ringmasterXRS
def __init_hadoop_service(self, serviceName, xmlrpcClient):
status = True
serviceAddress = None
serviceInfo = None
for i in range(0, 250):
try:
if hodInterrupt.isSet():
raise HodInterruptException()
serviceAddress = xmlrpcClient.getServiceAddr(serviceName)
if serviceAddress:
if serviceAddress == 'not found':
time.sleep(1)
# check to see if the job exited by any chance in that time:
if ((i+1) % self.__cfg['hod']['job-status-query-interval'] == 0):
if not self.__check_job_status():
break
else:
serviceInfo = xmlrpcClient.getURLs(serviceName)
break
except HodInterruptException,h :
raise h
except:
self.__log.critical("'%s': ringmaster xmlrpc error." % serviceName)
self.__log.debug(get_exception_string())
status = False
break
if serviceAddress == 'not found' or not serviceAddress:
self.__log.critical("Failed to retrieve '%s' service address." %
serviceName)
status = False
elif serviceAddress.startswith("Error: "):
errs = serviceAddress[len("Error: "):]
self.__log.critical("Cluster could not be allocated because of the following errors.\n%s" % \
errs)
status = False
else:
try:
self.__svcrgyClient.registerService(self.__cfg['hodring']['userid'],
self.jobId, self.__hostname,
serviceName, 'grid', serviceInfo)
except HodInterruptException, h:
raise h
except:
self.__log.critical("'%s': registry xmlrpc error." % serviceName)
self.__log.debug(get_exception_string())
status = False
return status, serviceAddress, serviceInfo
def __collect_jobtracker_ui(self, dir):
link = self.mapredInfo + "/jobtracker.jsp"
parser = miniHTMLParser()
parser.setBaseUrl(self.mapredInfo)
node_cache = {}
self.__log.debug("collect_jobtracker_ui seeded with " + link)
def alarm_handler(number, stack):
raise AlarmException("timeout")
signal.signal(signal.SIGALRM, alarm_handler)
input = None
while link:
self.__log.debug("link: %s" % link)
# taskstats.jsp,taskdetails.jsp not included since too many to collect
if re.search(
"jobfailures\.jsp|jobtracker\.jsp|jobdetails\.jsp|jobtasks\.jsp",
link):
for i in range(1,5):
if hodInterrupt.isSet():
raise HodInterruptException()
try:
input = urllib.urlopen(link)
break
except:
self.__log.debug(get_exception_string())
time.sleep(1)
if input:
out = None
self.__log.debug("collecting " + link + "...")
filename = re.sub(self.mapredInfo, "", link)
filename = dir + "/" + filename
filename = re.sub("http://","", filename)
filename = re.sub("[\?\&=:]","_",filename)
filename = filename + ".html"
try:
tempdir, tail = os.path.split(filename)
if not os.path.exists(tempdir):
os.makedirs(tempdir)
except:
self.__log.debug(get_exception_string())
out = open(filename, 'w')
bufSz = 8192
signal.alarm(10)
try:
self.__log.debug("Starting to grab: %s" % link)
buf = input.read(bufSz)
while len(buf) > 0:
# Feed the file into the HTML parser
parser.feed(buf)
# Re-write the hrefs in the file
p = re.compile("\?(.+?)=(.+?)")
buf = p.sub(r"_\1_\2",buf)
p= re.compile("&(.+?)=(.+?)")
buf = p.sub(r"_\1_\2",buf)
p = re.compile("http://(.+?):(\d+)?")
buf = p.sub(r"\1_\2/",buf)
buf = re.sub("href=\"/","href=\"",buf)
p = re.compile("href=\"(.+?)\"")
buf = p.sub(r"href=\1.html",buf)
out.write(buf)
buf = input.read(bufSz)
signal.alarm(0)
input.close()
if out:
out.close()
self.__log.debug("Finished grabbing: %s" % link)
except AlarmException:
if hodInterrupt.isSet():
raise HodInterruptException()
if out: out.close()
if input: input.close()
self.__log.debug("Failed to retrieve: %s" % link)
else:
self.__log.debug("Failed to retrieve: %s" % link)
# Get the next link in level traversal order
link = parser.getNextLink()
parser.close()
def check_cluster(self, clusterInfo):
status = 0
if 'mapred' in clusterInfo:
mapredAddress = clusterInfo['mapred'][7:]
hdfsAddress = clusterInfo['hdfs'][7:]
status = get_cluster_status(hdfsAddress, mapredAddress)
if status == 0:
status = 12
else:
status = 15
return status
def is_cluster_deallocated(self, jobId):
"""Returns True if the JobId that represents this cluster
is in the Completed or exiting state."""
jobInfo = self.__nodePool.getJobInfo(jobId)
state = None
if jobInfo is not None and jobInfo.has_key('job_state'):
state = jobInfo['job_state']
return ((state == 'C') or (state == 'E'))
def cleanup(self):
if self.__nodePool: self.__nodePool.finalize()
def get_job_id(self):
return self.jobId
def delete_job(self, jobId):
'''Delete a job given it's ID'''
ret = 0
if self.__nodePool:
ret = self.__nodePool.deleteJob(jobId)
else:
raise Exception("Invalid state: Node pool is not initialized to delete the given job.")
return ret
def is_valid_account(self):
"""Verify if the account being used to submit the job is a valid account.
This code looks for a file <install-dir>/bin/verify-account.
If the file is present, it executes the file, passing as argument
the account name. It returns the exit code and output from the
script on non-zero exit code."""
accountValidationScript = os.path.abspath('./verify-account')
if not os.path.exists(accountValidationScript):
return (0, None)
account = self.__nodePool.getAccountString()
exitCode = 0
errMsg = None
try:
accountValidationCmd = simpleCommand('Account Validation Command',\
'%s %s' % (accountValidationScript,
account))
accountValidationCmd.start()
accountValidationCmd.wait()
accountValidationCmd.join()
exitCode = accountValidationCmd.exit_code()
self.__log.debug('account validation script is run %d' \
% exitCode)
errMsg = None
if exitCode is not 0:
errMsg = accountValidationCmd.output()
except Exception, e:
exitCode = 0
self.__log.warn('Error executing account script: %s ' \
'Accounting is disabled.' \
% get_exception_error_string())
self.__log.debug(get_exception_string())
return (exitCode, errMsg)
def allocate(self, clusterDir, min, max=None):
status = 0
failureCount = 0
self.__svcrgyClient = self.__get_svcrgy_client()
self.__log.debug("allocate %s %s %s" % (clusterDir, min, max))
if min < 3:
self.__log.critical("Minimum nodes must be greater than 2.")
status = 2
else:
nodeSet = self.__nodePool.newNodeSet(min)
walltime = None
if self.__cfg['hod'].has_key('walltime'):
walltime = self.__cfg['hod']['walltime']
self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime)
# if the job submission returned an error other than no resources
# retry a couple of times
while (self.jobId is False) and (exitCode != 188):
if hodInterrupt.isSet():
raise HodInterruptException()
failureCount += 1
if (failureCount >= self.__cfg['hod']['job-status-query-failure-retries']):
self.__log.debug("failed submitting job more than the retries. exiting")
break
else:
# wait a bit before retrying
time.sleep(self.__cfg['hod']['job-command-failure-interval'])
if hodInterrupt.isSet():
raise HodInterruptException()
self.jobId, exitCode = self.__nodePool.submitNodeSet(nodeSet, walltime)
if self.jobId:
jobStatus = None
try:
jobStatus = self.__check_job_status()
except HodInterruptException, h:
self.__log.info(HOD_INTERRUPTED_MESG)
self.delete_job(self.jobId)
self.__log.info("Cluster %s removed from queue." % self.jobId)
raise h
else:
if jobStatus == -1:
self.delete_job(self.jobId);
status = 4
return status
if jobStatus:
self.__log.info("Cluster Id %s" \
% self.jobId)
try:
self.ringmasterXRS = self.__get_ringmaster_client()
self.__log.debug("Ringmaster at : %s" % self.ringmasterXRS )
ringClient = None
if self.ringmasterXRS:
ringClient = hodXRClient(self.ringmasterXRS)
hdfsStatus, hdfsAddr, self.hdfsInfo = \
self.__init_hadoop_service('hdfs', ringClient)
if hdfsStatus:
self.__log.info("HDFS UI at http://%s" % self.hdfsInfo)
mapredStatus, mapredAddr, self.mapredInfo = \
self.__init_hadoop_service('mapred', ringClient)
if mapredStatus:
self.__log.info("Mapred UI at http://%s" % self.mapredInfo)
if self.__cfg['hod'].has_key('update-worker-info') \
and self.__cfg['hod']['update-worker-info']:
workerInfoMap = {}
workerInfoMap['HDFS UI'] = 'http://%s' % self.hdfsInfo
workerInfoMap['Mapred UI'] = 'http://%s' % self.mapredInfo
if mapredAddr.find(':') != -1:
workerInfoMap['Mapred RPC Port'] = mapredAddr.split(':')[1]
ret = self.__nodePool.updateWorkerInfo(workerInfoMap, self.jobId)
if ret != 0:
self.__log.warn('Could not update HDFS and Mapred information.' \
'User Portal may not show relevant information.' \
'Error code=%s' % ret)
self.__cfg.replace_escape_seqs()
# Go generate the client side hadoop-site.xml now
# adding final-params as well, just so that conf on
# client-side and server-side are (almost) the same
clientParams = None
serverParams = {}
finalServerParams = {}
# client-params
if self.__cfg['hod'].has_key('client-params'):
clientParams = self.__cfg['hod']['client-params']
# server-params
if self.__cfg['gridservice-mapred'].has_key('server-params'):
serverParams.update(\
self.__cfg['gridservice-mapred']['server-params'])
if self.__cfg['gridservice-hdfs'].has_key('server-params'):
# note that if there are params in both mapred and hdfs
# sections, the ones in hdfs overwirte the ones in mapred
serverParams.update(\
self.__cfg['gridservice-hdfs']['server-params'])
# final-server-params
if self.__cfg['gridservice-mapred'].has_key(\
'final-server-params'):
finalServerParams.update(\
self.__cfg['gridservice-mapred']['final-server-params'])
if self.__cfg['gridservice-hdfs'].has_key(
'final-server-params'):
finalServerParams.update(\
self.__cfg['gridservice-hdfs']['final-server-params'])
clusterFactor = self.__cfg['hod']['cluster-factor']
tempDir = self.__cfg['hod']['temp-dir']
if not os.path.exists(tempDir):
os.makedirs(tempDir)
tempDir = os.path.join( tempDir, self.__cfg['hod']['userid']\
+ "." + self.jobId )
mrSysDir = getMapredSystemDirectory(self.__cfg['hodring']['mapred-system-dir-root'],\
self.__cfg['hod']['userid'], self.jobId)
self.__hadoopCfg.gen_site_conf(clusterDir, tempDir, min,\
hdfsAddr, mrSysDir, mapredAddr, clientParams,\
serverParams, finalServerParams,\
clusterFactor)
self.__log.info("hadoop-site.xml at %s" % clusterDir)
# end of hadoop-site.xml generation
else:
status = 8
else:
status = 7
else:
status = 6
if status != 0:
self.__log.debug("Cleaning up cluster id %s, as cluster could not be allocated." % self.jobId)
if ringClient is None:
self.delete_job(self.jobId)
else:
self.__log.debug("Calling rm.stop()")
ringClient.stopRM()
self.__log.debug("Returning from rm.stop()")
except HodInterruptException, h:
self.__log.info(HOD_INTERRUPTED_MESG)
if self.ringmasterXRS:
if ringClient is None:
ringClient = hodXRClient(self.ringmasterXRS)
self.__log.debug("Calling rm.stop()")
ringClient.stopRM()
self.__log.debug("Returning from rm.stop()")
self.__log.info("Cluster Shutdown by informing ringmaster.")
else:
self.delete_job(self.jobId)
self.__log.info("Cluster %s removed from queue directly." % self.jobId)
raise h
else:
self.__log.critical("No cluster found, ringmaster failed to run.")
status = 5
elif self.jobId == False:
if exitCode == 188:
self.__log.critical("Request execeeded maximum resource allocation.")
else:
self.__log.critical("Job submission failed with exit code %s" % exitCode)
status = 4
else:
self.__log.critical("Scheduler failure, allocation failed.\n\n")
status = 4
if status == 5 or status == 6:
ringMasterErrors = self.__svcrgyClient.getRMError()
if ringMasterErrors:
self.__log.critical("Cluster could not be allocated because" \
" of the following errors on the "\
"ringmaster host %s.\n%s" % \
(ringMasterErrors[0], ringMasterErrors[1]))
self.__log.debug("Stack trace on ringmaster: %s" % ringMasterErrors[2])
return status
def __isRingMasterAlive(self, rmAddr):
ret = True
rmSocket = tcpSocket(rmAddr)
try:
rmSocket.open()
rmSocket.close()
except tcpError:
ret = False
return ret
def deallocate(self, clusterDir, clusterInfo):
status = 0
nodeSet = self.__nodePool.newNodeSet(clusterInfo['min'],
id=clusterInfo['jobid'])
self.mapredInfo = clusterInfo['mapred']
self.hdfsInfo = clusterInfo['hdfs']
try:
if self.__cfg['hod'].has_key('hadoop-ui-log-dir'):
clusterStatus = self.check_cluster(clusterInfo)
if clusterStatus != 14 and clusterStatus != 10:
# If JT is still alive
self.__collect_jobtracker_ui(self.__cfg['hod']['hadoop-ui-log-dir'])
else:
self.__log.debug('hadoop-ui-log-dir not specified. Skipping Hadoop UI log collection.')
except HodInterruptException, h:
# got an interrupt. just pass and proceed to qdel
pass
except:
self.__log.info("Exception in collecting Job tracker logs. Ignoring.")
rmAddr = None
if clusterInfo.has_key('ring'):
# format is http://host:port/ We need host:port
rmAddr = clusterInfo['ring'][7:]
if rmAddr.endswith('/'):
rmAddr = rmAddr[:-1]
if (rmAddr is None) or (not self.__isRingMasterAlive(rmAddr)):
# Cluster is already dead, don't try to contact ringmaster.
self.__nodePool.finalize()
status = 10 # As cluster is dead, we just set the status to 'cluster dead'.
else:
xrsAddr = clusterInfo['ring']
rmClient = hodXRClient(xrsAddr)
self.__log.debug('calling rm.stop')
rmClient.stopRM()
self.__log.debug('completed rm.stop')
# cleanup hod temp dirs
tempDir = os.path.join( self.__cfg['hod']['temp-dir'], \
self.__cfg['hod']['userid'] + "." + clusterInfo['jobid'] )
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
return status
class hadoopScript:
def __init__(self, conf, execDir):
self.__environ = os.environ.copy()
self.__environ['HADOOP_CONF_DIR'] = conf
self.__execDir = execDir
def run(self, script):
scriptThread = simpleCommand(script, script, self.__environ, 4, False,
False, self.__execDir)
scriptThread.start()
scriptThread.wait()
scriptThread.join()
return scriptThread.exit_code()
| apache-2.0 | 4,332,073,597,946,133,500 | 35.912752 | 108 | 0.561164 | false |
evensonbryan/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/steps/source/mercurial.py | 4 | 13211 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
## Source step code for mercurial
from twisted.python import log
from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.steps.source.base import Source
from buildbot.interfaces import BuildSlaveTooOldError
from buildbot.config import ConfigErrors
from buildbot.status.results import SUCCESS
class Mercurial(Source):
""" Class for Mercurial with all the smarts """
name = "hg"
renderables = [ "repourl" ]
possible_modes = ('incremental', 'full')
possible_methods = (None, 'clean', 'fresh', 'clobber')
possible_branchTypes = ('inrepo', 'dirname')
def __init__(self, repourl=None, mode='incremental',
method=None, defaultBranch=None, branchType='dirname',
clobberOnBranchChange=True, **kwargs):
"""
@type repourl: string
@param repourl: the URL which points at the Mercurial repository.
if 'dirname' branches are enabled, this is the base URL
to which a branch name will be appended. It should
probably end in a slash.
@param defaultBranch: if branches are enabled, this is the branch
to use if the Build does not specify one
explicitly.
For 'dirname' branches, It will simply be
appended to C{repourl} and the result handed to
the 'hg update' command.
For 'inrepo' branches, this specifies the named
revision to which the tree will update after a
clone.
@param branchType: either 'dirname' or 'inrepo' depending on whether
the branch name should be appended to the C{repourl}
or the branch is a mercurial named branch and can be
found within the C{repourl}
@param clobberOnBranchChange: boolean, defaults to True. If set and
using inrepos branches, clobber the tree
at each branch change. Otherwise, just
update to the branch.
"""
self.repourl = repourl
self.defaultBranch = self.branch = defaultBranch
self.branchType = branchType
self.method = method
self.clobberOnBranchChange = clobberOnBranchChange
self.mode = mode
Source.__init__(self, **kwargs)
errors = []
if self.mode not in self.possible_modes:
errors.append("mode %s is not one of %s" %
(self.mode, self.possible_modes))
if self.method not in self.possible_methods:
errors.append("method %s is not one of %s" %
(self.method, self.possible_methods))
if self.branchType not in self.possible_branchTypes:
errors.append("branchType %s is not one of %s" %
(self.branchType, self.possible_branchTypes))
if repourl is None:
errors.append("you must provide a repourl")
if errors:
raise ConfigErrors(errors)
def startVC(self, branch, revision, patch):
self.revision = revision
self.method = self._getMethod()
self.stdio_log = self.addLogForRemoteCommands("stdio")
d = self.checkHg()
def checkInstall(hgInstalled):
if not hgInstalled:
raise BuildSlaveTooOldError("Mercurial is not installed on slave")
return 0
d.addCallback(checkInstall)
if self.branchType == 'dirname':
self.repourl = self.repourl + (branch or '')
self.branch = self.defaultBranch
self.update_branch = branch
elif self.branchType == 'inrepo':
self.update_branch = (branch or 'default')
if self.mode == 'full':
d.addCallback(lambda _: self.full())
elif self.mode == 'incremental':
d.addCallback(lambda _: self.incremental())
if patch:
d.addCallback(self.patch, patch)
d.addCallback(self.parseGotRevision)
d.addCallback(self.finish)
d.addErrback(self.failed)
@defer.inlineCallbacks
def full(self):
if self.method == 'clobber':
yield self.clobber(None)
return
updatable = yield self._sourcedirIsUpdatable()
if not updatable:
yield self._dovccmd(['clone', self.repourl, '.'])
elif self.method == 'clean':
yield self.clean(None)
elif self.method == 'fresh':
yield self.fresh(None)
else:
raise ValueError("Unknown method, check your configuration")
def incremental(self):
if self.method is not None:
raise ValueError(self.method)
d = self._sourcedirIsUpdatable()
def _cmd(updatable):
if updatable:
command = ['pull', self.repourl]
else:
command = ['clone', self.repourl, '.', '--noupdate']
return command
d.addCallback(_cmd)
d.addCallback(self._dovccmd)
d.addCallback(self._checkBranchChange)
return d
def clean(self, _):
command = ['--config', 'extensions.purge=', 'purge']
d = self._dovccmd(command)
d.addCallback(self._pullUpdate)
return d
def clobber(self, _):
cmd = buildstep.RemoteCommand('rmdir', {'dir': self.workdir,
'logEnviron':self.logEnviron})
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
d.addCallback(lambda _: self._dovccmd(['clone', '--noupdate'
, self.repourl, "."]))
d.addCallback(self._update)
return d
def fresh(self, _):
command = ['--config', 'extensions.purge=', 'purge', '--all']
d = self._dovccmd(command)
d.addCallback(self._pullUpdate)
return d
def finish(self, res):
d = defer.succeed(res)
def _gotResults(results):
self.setStatus(self.cmd, results)
return results
d.addCallback(_gotResults)
d.addCallbacks(self.finished, self.checkDisconnect)
return d
def parseGotRevision(self, _):
d = self._dovccmd(['parents', '--template', '{node}\\n'], collectStdout=True)
def _setrev(stdout):
revision = stdout.strip()
if len(revision) != 40:
raise ValueError("Incorrect revision id")
log.msg("Got Mercurial revision %s" % (revision, ))
self.updateSourceProperty('got_revision', revision)
return 0
d.addCallback(_setrev)
return d
@defer.inlineCallbacks
def _checkBranchChange(self, _):
current_branch = yield self._getCurrentBranch()
msg = "Working dir is on in-repo branch '%s' and build needs '%s'." % \
(current_branch, self.update_branch)
if current_branch != self.update_branch and self.clobberOnBranchChange:
msg += ' Clobbering.'
log.msg(msg)
yield self.clobber(None)
return
msg += ' Updating.'
log.msg(msg)
yield self._removeAddedFilesAndUpdate(None)
def _pullUpdate(self, res):
command = ['pull' , self.repourl]
if self.revision:
command.extend(['--rev', self.revision])
d = self._dovccmd(command)
d.addCallback(self._checkBranchChange)
return d
def _dovccmd(self, command, collectStdout=False, initialStdin=None, decodeRC={0:SUCCESS}):
if not command:
raise ValueError("No command specified")
cmd = buildstep.RemoteShellCommand(self.workdir, ['hg', '--verbose'] + command,
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout,
collectStdout=collectStdout,
initialStdin=initialStdin,
decodeRC=decodeRC)
cmd.useLog(self.stdio_log, False)
log.msg("Starting mercurial command : hg %s" % (" ".join(command), ))
d = self.runCommand(cmd)
def evaluateCommand(cmd):
if cmd.didFail():
log.msg("Source step failed while running command %s" % cmd)
raise buildstep.BuildStepFailed()
if collectStdout:
return cmd.stdout
else:
return cmd.rc
d.addCallback(lambda _: evaluateCommand(cmd))
return d
def computeSourceRevision(self, changes):
if not changes:
return None
# without knowing the revision ancestry graph, we can't sort the
# changes at all. So for now, assume they were given to us in sorted
# order, and just pay attention to the last one. See ticket #103 for
# more details.
if len(changes) > 1:
log.msg("Mercurial.computeSourceRevision: warning: "
"there are %d changes here, assuming the last one is "
"the most recent" % len(changes))
return changes[-1].revision
def patch(self, _, patch):
d = self._dovccmd(['import', '--no-commit', '-p', str(patch[0]), '-'],
initialStdin=patch[1])
return d
def _getCurrentBranch(self):
if self.branchType == 'dirname':
return defer.succeed(self.branch)
else:
d = self._dovccmd(['identify', '--branch'], collectStdout=True)
def _getbranch(stdout):
return stdout.strip()
d.addCallback(_getbranch).addErrback
return d
def _getMethod(self):
if self.method is not None and self.mode != 'incremental':
return self.method
elif self.mode == 'incremental':
return None
elif self.method is None and self.mode == 'full':
return 'fresh'
def _sourcedirIsUpdatable(self):
return self.pathExists(self.build.path_module.join(self.workdir, '.hg'))
def _removeAddedFilesAndUpdate(self, _):
command = ['locate', 'set:added()']
d = self._dovccmd(command, collectStdout=True, decodeRC={0:SUCCESS,1:SUCCESS})
def parseAndRemove(stdout):
files = []
for filename in stdout.splitlines() :
filename = self.workdir+'/'+filename
files.append(filename)
if len(files) == 0:
d = defer.succeed(0)
else:
if self.slaveVersionIsOlderThan('rmdir', '2.14'):
d = self.removeFiles(files)
else:
cmd = buildstep.RemoteCommand('rmdir', {'dir': files,
'logEnviron':
self.logEnviron,})
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
d.addCallback(lambda _: cmd.rc)
return d
d.addCallback(parseAndRemove)
d.addCallback(self._update)
return d
@defer.inlineCallbacks
def removeFiles(self, files):
for filename in files:
cmd = buildstep.RemoteCommand('rmdir', {'dir': filename,
'logEnviron': self.logEnviron,})
cmd.useLog(self.stdio_log, False)
yield self.runCommand(cmd)
if cmd.rc != 0:
defer.returnValue(cmd.rc)
return
defer.returnValue(0)
def _update(self, _):
command = ['update', '--clean']
if self.revision:
command += ['--rev', self.revision]
elif self.branchType == 'inrepo':
command += ['--rev', self.update_branch]
d = self._dovccmd(command)
return d
def checkHg(self):
d = self._dovccmd(['--version'])
def check(res):
if res == 0:
return True
return False
d.addCallback(check)
return d
| gpl-2.0 | 2,912,840,854,916,584,000 | 38.085799 | 94 | 0.550753 | false |
jianghuaw/nova | nova/policies/extended_availability_zone.py | 2 | 1272 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-extended-availability-zone'
extended_availability_zone_policies = [
policy.DocumentedRuleDefault(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
"Add `OS-EXT-AZ:availability_zone` into the server response",
[
{
'method': 'GET',
'path': '/servers/{id}'
},
{
'method': 'GET',
'path': '/servers/detail'
}
]),
]
def list_rules():
return extended_availability_zone_policies
| apache-2.0 | 8,420,198,627,852,604,000 | 28.581395 | 78 | 0.636006 | false |
Lujeni/ansible | lib/ansible/modules/cloud/google/gcp_compute_instance_group_manager.py | 16 | 20913 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_instance_group_manager
description:
- Creates a managed instance group using the information that you specify in the request.
After the group is created, it schedules an action to create instances in the group
using the specified instance template. This operation is marked as DONE when the
group is created even if the instances in the group have not yet been created. You
must separately verify the status of the individual instances.
- A managed instance group can have up to 1000 VM instances per group.
short_description: Creates a GCP InstanceGroupManager
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
base_instance_name:
description:
- The base instance name to use for instances in this group. The value must be
1-58 characters long. Instances are named by appending a hyphen and a random
four-character string to the base instance name.
- The base instance name must comply with RFC1035.
required: true
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
instance_template:
description:
- The instance template that is specified for this managed instance group. The
group uses this template to create all new instances in the managed instance
group.
- 'This field represents a link to a InstanceTemplate resource in GCP. It can
be specified in two ways. First, you can place a dictionary with key ''selfLink''
and value of your resource''s selfLink Alternatively, you can add `register:
name-of-resource` to a gcp_compute_instance_template task and then set this
instance_template field to "{{ name-of-resource }}"'
required: true
type: dict
name:
description:
- The name of the managed instance group. The name must be 1-63 characters long,
and comply with RFC1035.
required: true
type: str
named_ports:
description:
- Named ports configured for the Instance Groups complementary to this Instance
Group Manager.
required: false
type: list
suboptions:
name:
description:
- The name for this named port. The name must be 1-63 characters long, and
comply with RFC1035.
required: false
type: str
port:
description:
- The port number, which can be a value between 1 and 65535.
required: false
type: int
target_pools:
description:
- TargetPool resources to which instances in the instanceGroup field are added.
The target pools automatically apply to all of the instances in the managed
instance group.
required: false
type: list
target_size:
description:
- The target number of running instances for this managed instance group. Deleting
or abandoning instances reduces this number. Resizing the group changes this
number.
required: false
type: int
zone:
description:
- The zone the managed instance group resides.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
'''
EXAMPLES = '''
- name: create a network
gcp_compute_network:
name: network-instancetemplate
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a address
gcp_compute_address:
name: address-instancetemplate
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a instance template
gcp_compute_instance_template:
name: "{{ resource_name }}"
properties:
disks:
- auto_delete: 'true'
boot: 'true'
initialize_params:
source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-1604-lts
machine_type: n1-standard-1
network_interfaces:
- network: "{{ network }}"
access_configs:
- name: test-config
type: ONE_TO_ONE_NAT
nat_ip: "{{ address }}"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancetemplate
- name: create a instance group manager
gcp_compute_instance_group_manager:
name: test_object
base_instance_name: test1-child
instance_template: "{{ instancetemplate }}"
target_size: 3
zone: us-west1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
baseInstanceName:
description:
- The base instance name to use for instances in this group. The value must be 1-58
characters long. Instances are named by appending a hyphen and a random four-character
string to the base instance name.
- The base instance name must comply with RFC1035.
returned: success
type: str
creationTimestamp:
description:
- The creation timestamp for this managed instance group in RFC3339 text format.
returned: success
type: str
currentActions:
description:
- The list of instance actions and the number of instances in this managed instance
group that are scheduled for each of those actions.
returned: success
type: complex
contains:
abandoning:
description:
- The total number of instances in the managed instance group that are scheduled
to be abandoned. Abandoning an instance removes it from the managed instance
group without deleting it.
returned: success
type: int
creating:
description:
- The number of instances in the managed instance group that are scheduled to
be created or are currently being created. If the group fails to create any
of these instances, it tries again until it creates the instance successfully.
- If you have disabled creation retries, this field will not be populated; instead,
the creatingWithoutRetries field will be populated.
returned: success
type: int
creatingWithoutRetries:
description:
- The number of instances that the managed instance group will attempt to create.
The group attempts to create each instance only once. If the group fails to
create any of these instances, it decreases the group's targetSize value accordingly.
returned: success
type: int
deleting:
description:
- The number of instances in the managed instance group that are scheduled to
be deleted or are currently being deleted.
returned: success
type: int
none:
description:
- The number of instances in the managed instance group that are running and
have no scheduled actions.
returned: success
type: int
recreating:
description:
- The number of instances in the managed instance group that are scheduled to
be recreated or are currently being being recreated.
- Recreating an instance deletes the existing root persistent disk and creates
a new disk from the image that is defined in the instance template.
returned: success
type: int
refreshing:
description:
- The number of instances in the managed instance group that are being reconfigured
with properties that do not require a restart or a recreate action. For example,
setting or removing target pools for the instance.
returned: success
type: int
restarting:
description:
- The number of instances in the managed instance group that are scheduled to
be restarted or are currently being restarted.
returned: success
type: int
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
id:
description:
- A unique identifier for this resource.
returned: success
type: int
instanceGroup:
description:
- The instance group being managed.
returned: success
type: dict
instanceTemplate:
description:
- The instance template that is specified for this managed instance group. The group
uses this template to create all new instances in the managed instance group.
returned: success
type: dict
name:
description:
- The name of the managed instance group. The name must be 1-63 characters long,
and comply with RFC1035.
returned: success
type: str
namedPorts:
description:
- Named ports configured for the Instance Groups complementary to this Instance
Group Manager.
returned: success
type: complex
contains:
name:
description:
- The name for this named port. The name must be 1-63 characters long, and comply
with RFC1035.
returned: success
type: str
port:
description:
- The port number, which can be a value between 1 and 65535.
returned: success
type: int
region:
description:
- The region this managed instance group resides (for regional resources).
returned: success
type: str
targetPools:
description:
- TargetPool resources to which instances in the instanceGroup field are added.
The target pools automatically apply to all of the instances in the managed instance
group.
returned: success
type: list
targetSize:
description:
- The target number of running instances for this managed instance group. Deleting
or abandoning instances reduces this number. Resizing the group changes this number.
returned: success
type: int
zone:
description:
- The zone the managed instance group resides.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
base_instance_name=dict(required=True, type='str'),
description=dict(type='str'),
instance_template=dict(required=True, type='dict'),
name=dict(required=True, type='str'),
named_ports=dict(type='list', elements='dict', options=dict(name=dict(type='str'), port=dict(type='int'))),
target_pools=dict(type='list', elements='dict'),
target_size=dict(type='int'),
zone=dict(required=True, type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#instanceGroupManager'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.put(link, resource_to_request(module)))
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#instanceGroupManager',
u'baseInstanceName': module.params.get('base_instance_name'),
u'description': module.params.get('description'),
u'instanceTemplate': replace_resource_dict(module.params.get(u'instance_template', {}), 'selfLink'),
u'name': module.params.get('name'),
u'namedPorts': InstanceGroupManagerNamedportsArray(module.params.get('named_ports', []), module).to_request(),
u'targetPools': replace_resource_dict(module.params.get('target_pools', []), 'selfLink'),
u'targetSize': module.params.get('target_size'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'baseInstanceName': response.get(u'baseInstanceName'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'currentActions': InstanceGroupManagerCurrentactions(response.get(u'currentActions', {}), module).from_response(),
u'description': module.params.get('description'),
u'id': response.get(u'id'),
u'instanceGroup': response.get(u'instanceGroup'),
u'instanceTemplate': response.get(u'instanceTemplate'),
u'name': response.get(u'name'),
u'namedPorts': InstanceGroupManagerNamedportsArray(response.get(u'namedPorts', []), module).from_response(),
u'region': response.get(u'region'),
u'targetPools': response.get(u'targetPools'),
u'targetSize': response.get(u'targetSize'),
}
def region_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1/projects/.*/regions/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1/projects/{project}/regions/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#instanceGroupManager')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class InstanceGroupManagerCurrentactions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({})
def from_response(self):
return remove_nones_from_dict({})
class InstanceGroupManagerNamedportsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'name': item.get('name'), u'port': item.get('port')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'name': item.get(u'name'), u'port': item.get(u'port')})
if __name__ == '__main__':
main()
| gpl-3.0 | -4,554,732,298,482,910,000 | 32.676329 | 138 | 0.647827 | false |
111t8e/h2o-2 | py/testdir_single_jvm/test_import_file.py | 9 | 2415 | import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
import time, random
print "This tries to mimic the use of the file pathname with ImportFile like R does"
print "Normally the python tests import the folder first, using h2o_import.import_parse()"
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(java_heap_GB=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_import_file(self):
timeoutSecs = 500
cAll = [
'smalldata/jira/v-3.csv',
'smalldata/jira/v-3.csv',
'smalldata/jira/v-3.csv',
'smalldata/jira/v-3.csv',
]
# pop open a browser on the cloud
# h2b.browseTheCloud()
for c in cAll:
for i in range(10):
# interesting. this will pass ../.. to h2o which is legal for h2o
# but if this is run on a remote machine, we were assuming matching absolute paths
# not relative to a current wd. I suppose we should test ../.. relative paths
# but it would be local machine only (means you can't use this with -cj config.json ??
csvPathname = h2o.find_file('smalldata/jira/v-3.csv')
# http://172.16.2.222:54321/2/ImportFiles2.json?path=../../smalldata/jira/v-3.csv
# race between remove and import?
h2o.nodes[0].remove_all_keys()
importResult = h2o.nodes[0].import_files(csvPathname, timeoutSecs=15)
h2o.verboseprint(h2o.dump_json(importResult))
files = importResult['files']
keys = importResult['keys']
fails = importResult['fails']
dels = importResult['dels']
if len(files) == 0:
raise Exception("empty files: %s after import" % files)
if len(keys) == 0:
raise Exception("empty keys: %s after import" % keys)
if len(fails) != 0:
raise Exception("non-empty fails: %s after import" % fails)
if len(dels) != 0:
raise Exception("non-empty dels: %s after import" % dels)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | -9,109,593,050,737,356,000 | 37.951613 | 102 | 0.558178 | false |
caisq/tensorflow | tensorflow/contrib/optimizer_v2/checkpointable_utils_test.py | 8 | 33376 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
class NonLayerCheckpointable(tracking.Checkpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = util.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = (
util._serialize_object_graph(
root_checkpointable, saveables_cache=None))
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
# The Dense layers also save get_config() JSON
expected_checkpoint_names.extend(
["model/_second/.ATTRIBUTES/OBJECT_CONFIG_JSON",
"model/_named_dense/.ATTRIBUTES/OBJECT_CONFIG_JSON"])
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power",
optimizer_node.children[0].local_name)
self.assertEqual("beta1_power",
serialized_graph.nodes[optimizer_node.children[0].node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta1_power and beta2_power when appying gradients so we can
# test that they've been restored correctly.
beta1=1.0, beta2=1.0)
on_create_root = util.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(core_saver.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
with self.test_session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = util.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = tracking.Checkpointable()
root.var = util.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(util.gather_initializers(
util.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = util.CheckpointableSaver(root).save(
os.path.join(checkpoint_directory, "with_slots"))
new_root = tracking.Checkpointable()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = util.CheckpointableSaver(
new_root).restore(slots_path)
no_slot_status = util.CheckpointableSaver(
new_root).restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = util.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
saver.save(checkpoint_prefix)
before_ops = graph.get_operations()
saver.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.test_session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = tracking.Checkpointable()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
saver = util.CheckpointableSaver(obj)
save_path = saver.save(checkpoint_prefix)
saver.restore(save_path)
before_ops = graph.get_operations()
saver.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(util.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
return v, v + 1., v2
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save = save_template()
optimizer = adam.AdamOptimizer(0.0)
save_root = util.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = util.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2 = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(2, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = util.CheckpointableSaver(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_consumed()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
save_path = root.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| apache-2.0 | 4,735,191,202,764,980,000 | 43.148148 | 82 | 0.663651 | false |
philrosenfield/TPAGB-calib | tpagb_code_in_parallel.py | 1 | 7093 | from IPython import parallel
import time
import matplotlib.pyplot as plt
import numpy as np
import sfh_tests_multi_proc as sfh_tests
import ResolvedStellarPops as rsp
import stats
import model_plots
from data_plots import plot_cum_sum_sfr, plot_cmd_lf
def caller(vSFH, vsfh_kws):
return vSFH.vary_the_SFH(**vsfh_kws)
def main(targets, cmd_inputs, nsfhs, dry_run=False, comp_corr=False):
'''
calls sfh_tests_multi_proc.sfh_tests_multi_proc in most basic way possible
for up to 2 * available processors. Target & cmd_inputs are distributed and
nsfhs are all done per processor. So with 12 processors, you can do up to
24 target and cmd_inputs in one call, or code something better for a change.
'''
clients = parallel.Client()
clients.block = False
clients[:].execute('cd ~/research/TP-AGBcalib/code/TPAGB-calib/')
clients[:].execute('import sfh_tests')
if comp_corr is True:
table_file = 'comp_corr'
else:
table_file = 'default'
vSFHs, vsfh_kws = sfh_tests.prepare_vsfh_run(targets, cmd_inputs, nsfhs,
dry_run=dry_run,
table_file=table_file)
if dry_run is True:
timeout = 10
else:
timeout = 900
# find a better way to run this all at once, what if I need three times
# through?
nprocs = len(clients)
nvsfhs = len(vSFHs)
ntimes = np.min([nprocs, nvsfhs])
ndiff = np.abs(nvsfhs - nprocs)
if ndiff > nprocs:
print 'need a for loop, too many processes code code code man'
import sys
sys.exit()
print 'calling first set'
res = [clients[i].apply(caller, vSFHs[i], vsfh_kws,)
for i in range(ntimes)]
while False in [r.ready() for r in res]:
print 'waiting on first set'
time.sleep(timeout)
print 'checking first set...'
print 'writing first set'
[vSFHs[i].write_results(res[i].result) for i in range(ntimes)]
print 'calling second set'
res2 = [clients[i].apply(caller, vSFHs[i+ntimes], vsfh_kws,)
for i in range(ndiff)]
while False in [r.ready() for r in res2]:
print 'waiting on second set'
time.sleep(timeout)
print 'checking second set...'
print 'writing second set'
[vSFHs[i+ntimes].write_results(res2[i].result) for i in range(ndiff)]
print 'done.'
def lf_figs(targets, cmd_inputs, nsfhs, outfile_dir='default', extra_str='',
default_kw=None, comp_corr=False, example=True):
import os
if comp_corr is True:
table_file = 'comp_corr'
else:
table_file = 'default'
vSFHs, vsfh_kws = sfh_tests.prepare_vsfh_run(targets, cmd_inputs, nsfhs,
vsfh_kw={'outfile_loc': outfile_dir,
'extra_str': extra_str,
'table_file': table_file},
default_kw=default_kw)
if comp_corr is True:
extra_str += '_comp'
for i in range(len(vSFHs)):
pl = sfh_tests.Plotting(vSFHs[i])
pl.compare_to_gal(extra_str=extra_str,
completeness_correction=comp_corr)
# example LF from the model
if example is True:
for i in range(len(vSFHs)):
pl = sfh_tests.Plotting(vSFHs[i])
best = rsp.fileIO.get_files(os.path.join(outfile_dir, vSFHs[i].target,
vSFHs[i].agb_mod, 'mc'),
'*best.dat')
if len(best) == 0:
continue
pl.compare_to_gal(narratio=False, add_stage_lfs='all',
extra_str='no_data', plot_data=False,
completeness_correction=comp_corr,
plot_models=False,
trilegal_output=best[0])
return
def narratio_table(outfile_dir):
narratio_files = rsp.fileIO.get_files(outfile_dir, '*narratio*dat')
stats.narratio_table(narratio_files)
return
def chi2_stats(targets, cmd_inputs, outfile_dir='default', extra_str=''):
chi2_files = stats.write_chi2_table(targets, cmd_inputs,
outfile_loc=outfile_dir,
extra_str=extra_str)
chi2_dicts = stats.result2dict(chi2_files)
stats.chi2plot(chi2_dicts, outfile_loc=outfile_dir)
chi2_files = stats.write_chi2_table(targets, cmd_inputs,
outfile_loc=outfile_dir,
extra_str=extra_str,
just_gauss=True)
plt.close('all')
return
def analysis(targets, cmd_inputs, nsfhs, outfile_dirs, extra_str='',
comp_corr=False, default_kw=None):
default_kw = default_kw or {}
# _mstar or _cstar:
if 'star' in extra_str:
from TPAGBparams import snap_src
default_kw = dict({'galaxy_input_src': snap_src + '/input/tests/',
'galaxy_input_search_fmt': '*%s' + '%s.dat' % extra_str}.items()
+ default_kw.items())
for out_dir in outfile_dirs:
lf_figs(targets, cmd_inputs, nsfhs, outfile_dir=out_dir,
extra_str=extra_str, default_kw=default_kw,
comp_corr=comp_corr)
#chi2_stats(targets, cmd_inputs, outfile_dir=out_dir,
# extra_str=extra_str)
#narratio_table(out_dir)
#model_plots.plot_random_sfhs(targets)
def all_data_plots(targets):
plot_cum_sum_sfr(targets)
[[plot_cmd_lf(target, band) for target in targets] for band in ['opt', 'ir']]
if __name__ == '__main__':
# could be an input file:
targets = ['scl-de1', 'ddo71', 'kkh37', 'ddo78', 'hs117', 'ngc2976-deep']
#targets = ['scl-de1']
cmd_inputs = ['cmd_input_CAF09_S_NOV13.dat',
'cmd_input_CAF09_S_NOV13eta0.dat',
'cmd_input_CAF09_S_OCT13.dat']
models = [c.split('S_')[1].replace('.dat', '') for c in cmd_inputs]
#outfile_dirs = ['/home/rosenfield/research/TP-AGBcalib/SNAP/models/varysfh/match-hmc/']
outfile_dirs = ['/home/rosenfield/research/TP-AGBcalib/SNAP/models/varysfh/comp_corr']
nsfhs = 5
dry_run = True
comp_corr = True
#main(targets, cmd_inputs, nsfhs, dry_run=dry_run, comp_corr=comp_corr)
#all_data_plots(targets)
#model_plots.agb_lifetimes(models, z='all')
#model_plots.compare_agb_lifetimes()
analysis(targets, cmd_inputs, nsfhs, outfile_dirs, comp_corr=comp_corr)
#model_plots.tpagb_mass_histograms(chi2_location=outfile_dirs[0],
# band='opt', dry_run=True, model='nov13',
# model_src=outfile_dirs[0], force=True,
# cumsum=False)
#[model_plots.compare_mass_loss(masses=m, z=0.001, paola=True)
# for m in [1., 2.]]
| bsd-3-clause | 2,563,227,966,874,228,700 | 38.187845 | 92 | 0.559707 | false |
ionelmc/python-stampede | tests/helper.py | 1 | 1491 | import logging
import os
import sys
import time
from stampede import StampedeWorker
try:
from pytest_cov.embed import cleanup
os._exit = lambda code, original=os._exit: cleanup() or original(code)
except ImportError:
pass
PATH = '/tmp/stampede-tests'
class MockedStampedeWorker(StampedeWorker):
alarm_time = 1
def handle_task(self, workspace_name):
entrypoint = sys.argv[1]
if entrypoint == 'simple':
logging.critical('JOB %s EXECUTED', workspace_name.decode('ascii'))
elif entrypoint == 'fail':
raise Exception('FAIL')
elif entrypoint == 'queue_collapse':
assert workspace_name == b'queue_collapse'
time.sleep(0.35)
logging.critical('queue_collapse OK')
elif entrypoint == 'timeout':
logging.critical('timeout STARTED')
time.sleep(2)
logging.critical('timeout FAIL')
elif entrypoint == 'custom_exit_code':
raise SystemExit(123)
elif entrypoint == 'bad_client':
logging.critical('JOB %s EXECUTED', workspace_name)
time.sleep(0.1)
else:
raise RuntimeError('Invalid test spec %r.' % entrypoint)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='[pid=%(process)d - %(asctime)s]: %(name)s - %(levelname)s - %(message)s',
)
daemon = MockedStampedeWorker(PATH)
daemon.run()
logging.info("DONE.")
| bsd-2-clause | -795,992,656,746,797,400 | 27.673077 | 89 | 0.604963 | false |
rarsan/azure-quickstart-templates | bosh-setup/scripts/setup_env.py | 19 | 11054 | #!/usr/bin/env python
import json
import netaddr
import os
import random
import re
import requests
import sys
import ruamel.yaml
import base64
from azure.storage.blob import AppendBlobService
from azure.storage.table import TableService
import azure.mgmt.network
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.network import NetworkManagementClient, NetworkManagementClientConfiguration
def my_represent_none(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
def prepare_storage(settings):
default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"]
storage_access_key = settings["DEFAULT_STORAGE_ACCESS_KEY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
protocol = "https"
if settings["ENVIRONMENT"] == "AzureStack":
protocol = "http"
blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol)
blob_service.create_container('bosh')
blob_service.create_container(
container_name='stemcell',
public_access='blob'
)
# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix, protocol=protocol)
table_service.create_table('stemcells')
# file_path: String. The path to the file in which some configs starting with 'REPLACE_WITH_' need to be replaced with the actual values.
# keys: Array. The keys indicate which configs should be replaced in the file.
# values: Dict. Key-value pairs indicate which configs should be replaced by what values.
def render_file(file_path, keys, values):
try:
with open(file_path, 'r') as tmpfile:
contents = tmpfile.read()
for key in keys:
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(values[key], contents)
with open(file_path, 'w') as tmpfile:
tmpfile.write(contents)
return True
except Exception as e:
print("render_file - {0}: {1}".format(file_path, str(e)))
return False
def render_bosh_manifest(settings):
with open('bosh.pub', 'r') as tmpfile:
ssh_public_key = tmpfile.read().strip()
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_BOSH'])
gateway_ip = str(ip[1])
bosh_director_ip = str(ip[4])
ntp_servers_maps = {
"AzureCloud": "0.north-america.pool.ntp.org",
"AzureChinaCloud": "1.cn.pool.ntp.org, 1.asia.pool.ntp.org, 0.asia.pool.ntp.org",
"AzureUSGovernment": "0.north-america.pool.ntp.org",
"AzureGermanCloud": "0.europe.pool.ntp.org",
"AzureStack": "0.north-america.pool.ntp.org"
}
environment = settings["ENVIRONMENT"]
ntp_servers = ntp_servers_maps[environment]
postgres_address_maps = {
"AzureCloud": "127.0.0.1",
"AzureChinaCloud": bosh_director_ip,
"AzureUSGovernment": "127.0.0.1",
"AzureGermanCloud": "127.0.0.1",
"AzureStack": "127.0.0.1"
}
postgres_address = postgres_address_maps[environment]
keys = [
"SUBNET_ADDRESS_RANGE_FOR_BOSH",
"SECONDARY_DNS",
"VNET_NAME",
"SUBNET_NAME_FOR_BOSH",
"DNS_RECURSOR",
"SUBSCRIPTION_ID",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"RESOURCE_GROUP_NAME",
"KEEP_UNREACHABLE_VMS",
"TENANT_ID",
"CLIENT_ID",
"CLIENT_SECRET",
"BOSH_PUBLIC_IP",
"NSG_NAME_FOR_BOSH",
"BOSH_RELEASE_URL",
"BOSH_RELEASE_SHA1",
"BOSH_AZURE_CPI_RELEASE_URL",
"BOSH_AZURE_CPI_RELEASE_SHA1",
"DYNAMIC_STEMCELL_URL",
"DYNAMIC_STEMCELL_SHA1",
"ENVIRONMENT",
"BOSH_VM_SIZE",
"SSH_PUBLIC_KEY",
"GATEWAY_IP",
"BOSH_DIRECTOR_IP",
"NTP_SERVERS",
"POSTGRES_ADDRESS"
]
values = settings.copy()
values["SSH_PUBLIC_KEY"] = ssh_public_key
values["GATEWAY_IP"] = gateway_ip
values["BOSH_DIRECTOR_IP"] = bosh_director_ip
values["NTP_SERVERS"] = ntp_servers
values["POSTGRES_ADDRESS"] = postgres_address
manifest_file = "bosh.yml"
render_file(manifest_file, keys, values)
if environment == "AzureStack":
azure_stack_properties = {
"domain": str(values["AZURE_STACK_DOMAIN"]),
"authentication": "AzureAD",
"resource": str(values["AZURE_STACK_RESOURCE"]),
"endpoint_prefix": "management",
"skip_ssl_validation": True,
"use_http_to_access_storage_account": True
}
with open(manifest_file, "r") as conf:
manifest = ruamel.yaml.round_trip_load(conf, preserve_quotes=True)
manifest['cloud_provider']['properties']['azure']['azure_stack'] = azure_stack_properties
with open(manifest_file, "w") as conf:
ruamel.yaml.round_trip_dump(manifest, conf)
return bosh_director_ip
def get_cloud_foundry_configuration(settings, bosh_director_ip):
dns_maps = {
"AzureCloud": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"]),
"AzureChinaCloud": bosh_director_ip,
"AzureUSGovernment": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"]),
"AzureGermanCloud": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"]),
"AzureStack": "168.63.129.16\n - {0}".format(settings["SECONDARY_DNS"])
}
config = {}
config["DNS"] = dns_maps[settings["ENVIRONMENT"]]
config["SYSTEM_DOMAIN"] = "{0}.xip.io".format(settings["CLOUD_FOUNDRY_PUBLIC_IP"])
keys = [
"VNET_NAME",
"SUBNET_NAME_FOR_CLOUD_FOUNDRY",
"CLOUD_FOUNDRY_PUBLIC_IP",
"NSG_NAME_FOR_CLOUD_FOUNDRY",
"ENVIRONMENT",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"DEFAULT_STORAGE_ACCESS_KEY"
]
for key in keys:
config[key] = settings[key]
return config
def render_cloud_foundry_manifest(settings, bosh_director_ip):
config = get_cloud_foundry_configuration(settings, bosh_director_ip)
if settings["ENVIRONMENT"] == "AzureStack":
manifest_file = "multiple-vm-cf.yml"
update_cloud_foundry_manifest_for_azurestack(manifest_file)
render_file(manifest_file, config.keys(), config)
else:
for manifest_file in ["single-vm-cf.yml", "multiple-vm-cf.yml"]:
render_file(manifest_file, config.keys(), config)
def update_cloud_foundry_manifest_for_azurestack(manifest_file):
with open(manifest_file, "r") as conf:
manifest = ruamel.yaml.round_trip_load(conf, preserve_quotes=True)
# Use smaller VM size
manifest["compilation"]["cloud_properties"]["instance_type"] = "Standard_A1"
for resource_pool in manifest["resource_pools"]:
if resource_pool["name"].startswith("cell"):
resource_pool["cloud_properties"]["instance_type"] = "Standard_A4"
else:
resource_pool["cloud_properties"]["instance_type"] = "Standard_A1"
# In AzureStack, availability sets can only be configured with a fault domain of one and an update domain of one
resource_pool["cloud_properties"]["platform_update_domain_count"] = 1
resource_pool["cloud_properties"]["platform_fault_domain_count"] = 1
# Use webdav as the blobstore since fog is not supported in AzureStack
webdav_config = {
"blobstore_timeout": 5,
"ca_cert": "REPLACE_WITH_BLOBSTORE_CA_CERT",
"password": "REPLACE_WITH_BLOBSTORE_PASSWORD",
"private_endpoint": "https://blobstore.service.cf.internal:4443",
"public_endpoint": "http://blobstore.REPLACE_WITH_SYSTEM_DOMAIN",
"username": "blobstore"
}
for item in ["buildpacks", "droplets", "packages", "resource_pool"]:
manifest["properties"]["cc"][item]["blobstore_type"] = "webdav"
manifest["properties"]["cc"][item]["fog_connection"] = None
manifest["properties"]["cc"][item]["webdav_config"] = webdav_config
for job in manifest["jobs"]:
if job["name"].startswith("blobstore"):
job["instances"] = 1
blobstore = {
"admin_users": [
{
"password": "REPLACE_WITH_BLOBSTORE_PASSWORD",
"username": "blobstore"
}
],
"port": 8080,
"secure_link": {
"secret": "REPLACE_WITH_BLOBSTORE_SECRET"
},
"tls": {
"ca_cert": "REPLACE_WITH_BLOBSTORE_CA_CERT",
"cert": "REPLACE_WITH_BLOBSTORE_TLS_CERT",
"port": 4443,
"private_key": "REPLACE_WITH_BLOBSTORE_PRIVATE_KEY"
}
}
manifest["properties"]["blobstore"] = blobstore
with open(manifest_file, "w") as conf:
ruamel.yaml.round_trip_dump(manifest, conf)
def render_bosh_deployment_cmd(bosh_director_ip):
keys = ["BOSH_DIRECOT_IP"]
values = {}
values["BOSH_DIRECOT_IP"] = bosh_director_ip
render_file("deploy_bosh.sh", keys, values)
def render_cloud_foundry_deployment_cmd(settings):
keys = [
"STATIC_STEMCELL_URL",
"STATIC_STEMCELL_SHA1",
"STATIC_CF_RELEASE_URL",
"STATIC_CF_RELEASE_SHA1",
"STATIC_DIEGO_RELEASE_URL",
"STATIC_DIEGO_RELEASE_SHA1",
"STATIC_GARDEN_RELEASE_URL",
"STATIC_GARDEN_RELEASE_SHA1",
"STATIC_CFLINUXFS2_RELEASE_URL",
"STATIC_CFLINUXFS2_RELEASE_SHA1",
"DYNAMIC_STEMCELL_URL",
"DYNAMIC_STEMCELL_SHA1",
"DYNAMIC_CF_RELEASE_URL",
"DYNAMIC_CF_RELEASE_SHA1",
"DYNAMIC_DIEGO_RELEASE_URL",
"DYNAMIC_DIEGO_RELEASE_SHA1",
"DYNAMIC_GARDEN_RELEASE_URL",
"DYNAMIC_GARDEN_RELEASE_SHA1",
"DYNAMIC_CFLINUXFS2_RELEASE_URL",
"DYNAMIC_CFLINUXFS2_RELEASE_SHA1"
]
render_file("deploy_cloudfoundry.sh", keys, settings)
def get_settings():
settings = dict()
config_file = sys.argv[4]
with open(config_file) as f:
settings = json.load(f)
settings['TENANT_ID'] = sys.argv[1]
settings['CLIENT_ID'] = sys.argv[2]
settings['CLIENT_SECRET'] = base64.b64decode(sys.argv[3])
print "tenant_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['TENANT_ID'][0:4])
print "client_id: {0}xxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx".format(settings['CLIENT_ID'][0:4])
print "The length of client_secret is {0}".format(len(settings['CLIENT_SECRET']))
return settings
def main():
settings = get_settings()
with open('settings', "w") as tmpfile:
tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
prepare_storage(settings)
bosh_director_ip = render_bosh_manifest(settings)
render_bosh_deployment_cmd(bosh_director_ip)
render_cloud_foundry_manifest(settings, bosh_director_ip)
render_cloud_foundry_deployment_cmd(settings)
if __name__ == "__main__":
ruamel.yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none)
main()
| mit | -7,124,612,768,369,966,000 | 37.381944 | 163 | 0.635697 | false |
apache/beam | sdks/python/apache_beam/io/gcp/datastore/v1new/query_splitter_test.py | 5 | 9623 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cloud Datastore query splitter test."""
# pytype: skip-file
import unittest
import mock
# Protect against environments where datastore library is not available.
try:
from apache_beam.io.gcp.datastore.v1new import helper
from apache_beam.io.gcp.datastore.v1new import query_splitter
from apache_beam.io.gcp.datastore.v1new import types
from apache_beam.io.gcp.datastore.v1new.query_splitter import SplitNotPossibleError
from google.cloud.datastore import key
except ImportError:
query_splitter = None # type: ignore
@unittest.skipIf(query_splitter is None, 'GCP dependencies are not installed')
class QuerySplitterTest(unittest.TestCase):
_PROJECT = 'project'
_NAMESPACE = 'namespace'
def create_query(
self,
kinds=(),
order=False,
limit=None,
offset=None,
inequality_filter=False):
kind = None
filters = []
if kinds:
kind = kinds[0]
if order:
order = ['prop1']
if inequality_filter:
filters = [('prop1', '>', 'value1')]
return types.Query(kind=kind, filters=filters, order=order, limit=limit)
def test_get_splits_query_with_order(self):
query = self.create_query(kinds=['a'], order=True)
with self.assertRaisesRegex(SplitNotPossibleError, r'sort orders'):
query_splitter.get_splits(None, query, 3)
def test_get_splits_query_with_unsupported_filter(self):
query = self.create_query(kinds=['a'], inequality_filter=True)
with self.assertRaisesRegex(SplitNotPossibleError, r'inequality filters'):
query_splitter.get_splits(None, query, 2)
def test_get_splits_query_with_limit(self):
query = self.create_query(kinds=['a'], limit=10)
with self.assertRaisesRegex(SplitNotPossibleError, r'limit set'):
query_splitter.get_splits(None, query, 2)
def test_get_splits_query_with_num_splits_of_one(self):
query = self.create_query()
with self.assertRaisesRegex(SplitNotPossibleError, r'num_splits'):
query_splitter.get_splits(None, query, 1)
def test_create_scatter_query(self):
query = types.Query(kind='shakespeare-demo')
num_splits = 10
scatter_query = query_splitter._create_scatter_query(query, num_splits)
self.assertEqual(scatter_query.kind, query.kind)
self.assertEqual(
scatter_query.limit, (num_splits - 1) * query_splitter.KEYS_PER_SPLIT)
self.assertEqual(
scatter_query.order, [query_splitter.SCATTER_PROPERTY_NAME])
self.assertEqual(
scatter_query.projection, [query_splitter.KEY_PROPERTY_NAME])
def check_get_splits(self, query, num_splits, num_entities):
"""A helper method to test the query_splitter get_splits method.
Args:
query: the query to be split
num_splits: number of splits
num_entities: number of scatter entities returned to the splitter.
"""
# Test for random long ids, string ids, and a mix of both.
for id_or_name in [True, False, None]:
if id_or_name is None:
client_entities = helper.create_client_entities(num_entities, False)
client_entities.extend(
helper.create_client_entities(num_entities, True))
num_entities *= 2
else:
client_entities = helper.create_client_entities(
num_entities, id_or_name)
mock_client = mock.MagicMock()
mock_client_query = mock.MagicMock()
mock_client_query.fetch.return_value = client_entities
with mock.patch.object(types.Query,
'_to_client_query',
return_value=mock_client_query):
split_queries = query_splitter.get_splits(
mock_client, query, num_splits)
mock_client_query.fetch.assert_called_once()
# if request num_splits is greater than num_entities, the best it can
# do is one entity per split.
expected_num_splits = min(num_splits, num_entities + 1)
self.assertEqual(len(split_queries), expected_num_splits)
# Verify no gaps in key ranges. Filters should look like:
# query1: (__key__ < key1)
# query2: (__key__ >= key1), (__key__ < key2)
# ...
# queryN: (__key__ >=keyN-1)
prev_client_key = None
last_query_seen = False
for split_query in split_queries:
self.assertFalse(last_query_seen)
lt_key = None
gte_key = None
for _filter in split_query.filters:
self.assertEqual(query_splitter.KEY_PROPERTY_NAME, _filter[0])
if _filter[1] == '<':
lt_key = _filter[2]
elif _filter[1] == '>=':
gte_key = _filter[2]
# Case where the scatter query has no results.
if lt_key is None and gte_key is None:
self.assertEqual(1, len(split_queries))
break
if prev_client_key is None:
self.assertIsNone(gte_key)
self.assertIsNotNone(lt_key)
prev_client_key = lt_key
else:
self.assertEqual(prev_client_key, gte_key)
prev_client_key = lt_key
if lt_key is None:
last_query_seen = True
def test_get_splits_with_two_splits(self):
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 2
num_entities = 97
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_multiple_splits(self):
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 4
num_entities = 369
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_large_num_splits(self):
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 10
num_entities = 4
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_small_num_entities(self):
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 4
num_entities = 50
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_batch_size_exact_multiple(self):
"""Test get_splits when num scatter keys is a multiple of batch size."""
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 4
num_entities = 400
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_large_batch_size(self):
"""Test get_splits when all scatter keys are retured in a single req."""
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 4
num_entities = 400
self.check_get_splits(query, num_splits, num_entities)
def test_get_splits_with_num_splits_gt_entities(self):
query = self.create_query(kinds=['shakespeare-demo'])
num_splits = 10
num_entities = 4
self.check_get_splits(query, num_splits, num_entities)
def test_id_or_name(self):
id_ = query_splitter.IdOrName(1)
self.assertEqual(1, id_.id)
self.assertIsNone(id_.name)
name = query_splitter.IdOrName('1')
self.assertIsNone(name.id)
self.assertEqual('1', name.name)
self.assertEqual(query_splitter.IdOrName(1), query_splitter.IdOrName(1))
self.assertEqual(query_splitter.IdOrName('1'), query_splitter.IdOrName('1'))
self.assertLess(query_splitter.IdOrName(2), query_splitter.IdOrName('1'))
self.assertLess(query_splitter.IdOrName(1), query_splitter.IdOrName(2))
self.assertLess(query_splitter.IdOrName('1'), query_splitter.IdOrName('2'))
def test_client_key_sort_key(self):
k = key.Key('kind1', 1, project=self._PROJECT, namespace=self._NAMESPACE)
k2 = key.Key('kind2', 'a', parent=k)
k3 = key.Key('kind2', 'b', parent=k)
k4 = key.Key('kind1', 'a', project=self._PROJECT, namespace=self._NAMESPACE)
k5 = key.Key('kind1', 'a', project=self._PROJECT)
keys = [k5, k, k4, k3, k2, k2, k]
expected_sort = [k5, k, k, k2, k2, k3, k4]
keys.sort(key=query_splitter.client_key_sort_key)
self.assertEqual(expected_sort, keys)
def test_client_key_sort_key_ids(self):
k1 = key.Key('kind', 2, project=self._PROJECT)
k2 = key.Key('kind', 1, project=self._PROJECT)
keys = [k1, k2]
expected_sort = [k2, k1]
keys.sort(key=query_splitter.client_key_sort_key)
self.assertEqual(expected_sort, keys)
def test_client_key_sort_key_names(self):
k1 = key.Key('kind', '2', project=self._PROJECT)
k2 = key.Key('kind', '1', project=self._PROJECT)
keys = [k1, k2]
expected_sort = [k2, k1]
keys.sort(key=query_splitter.client_key_sort_key)
self.assertEqual(expected_sort, keys)
def test_client_key_sort_key_ids_vs_names(self):
# Keys with IDs always come before keys with names.
k1 = key.Key('kind', '1', project=self._PROJECT)
k2 = key.Key('kind', 2, project=self._PROJECT)
keys = [k1, k2]
expected_sort = [k2, k1]
keys.sort(key=query_splitter.client_key_sort_key)
self.assertEqual(expected_sort, keys)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 3,151,941,264,413,542,400 | 35.729008 | 85 | 0.666736 | false |
wendlers/usherpa-pysherpa | example-src/uSherpaAnalogRead.py | 1 | 1755 | ##
# This file is part of the uSherpa Python Library project
#
# Copyright (C) 2012 Stefan Wendler <[email protected]>
#
# The uSherpa Python Library is free software; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# uSherpa Python Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with the JSherpa firmware; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
# 02111-1307 USA.
##
'''
This file is part of the uSherpa Python Library project
'''
import traceback
from usherpa.api import *
from usherpa.serialcomm import *
# Searial Packet stream instance
ps = None
try:
print "uSherpaAnalogRead"
ps = SerialPacketStream("/dev/ttyUSB0")
ps.start()
us = uSherpa(ps)
# configure pin 1.5 for analog input
print "Set P1.5 to ANALOG input: "
us.pinMode(uSherpa.PIN_1_5, uSherpa.ANALOG)
print "-> OK"
# performe analog read on pin 1.5
a = us.analogRead(uSherpa.PIN_1_5);
# convert value from analog read to volts:
# - assuming Vmax is 3.3V
# - assuming max value from analog read is 1024
v = (3.3 / 1024.0) * a
print "-> OK: ~ volts " + `v` + " (" + `a` + ")"
# reset MCU
print "RESET: "
us.reset()
print "-> OK"
except Exception as e:
print traceback.format_exc()
finally:
if not ps == None:
ps.stop()
| lgpl-2.1 | -2,108,981,335,989,746,700 | 24.808824 | 76 | 0.702564 | false |
MSMBA/msmba-workflow | msmba-workflow/srclib/wax/examples/trayicon1.py | 1 | 1923 | # trayicon1.py
# Based on wxPython example at http://norfolkgraphics.com/python.php
from wax import *
class MainFrame(Frame):
def Body(self):
self.tbicon = TaskBarIcon()
self.status = 0
self.UpdateIcon()
self.tbicon.OnLeftDoubleClick = self.RestoreWindow
self.tbicon.OnRightUp = self.ShowTaskBarMenu
self.Show(1)
def UpdateIcon(self):
""" Update icon based on current state. """
if self.status == 0:
self.tbicon.SetIcon('icon1.ico', 'Icon demo - state 1')
else:
self.tbicon.SetIcon('icon2.ico', 'Icon demo - state 2')
def ToggleIcon(self, event):
self.status = not self.status
self.UpdateIcon()
def RestoreWindow(self, event=None):
""" Show/restore main window. """
self.Show(1)
self.Iconize(0)
def HideWindow(self, event=None):
self.Iconize(1)
def ShowTaskBarMenu(self, event=None):
menu = Menu(self.tbicon)
# choose Show/Hide based on current window state
if self.IsIconized():
menu.Append('&Show window', self.RestoreWindow)
else:
menu.Append('&Hide window', self.HideWindow)
# these entries are always present
menu.Append('&Toggle Icon', self.ToggleIcon)
menu.Append('E&xit', self.ExitApp)
self.tbicon.PopupMenu(menu)
#menu.Destroy() # ...why?
#core.wx.GetApp().ProcessIdle() # ...?
def ExitApp(self, event):
self.Close()
def OnIconize(self, event=None):
self.Iconize(1) # minimize
self.Show(0) # hide taskbar button
def OnClose(self, event):
# unfortunately, this is necessary, otherwise the program will hang
# on exit
self.tbicon.Destroy()
event.Skip()
if __name__ == "__main__":
app = Application(MainFrame, title='trayicon demo')
app.Run()
| gpl-2.0 | 3,469,310,579,633,343,000 | 25.708333 | 75 | 0.593864 | false |
x75/smq | smq/utils.py | 1 | 4882 | """smq util functions"""
import time
import numpy as np
from collections import OrderedDict
def get_items(items_conf):
"""generic function creating a list of objects from a config specification
objects are: analyses, robots, worlds, tasks, losses, ...
"""
items = []
for i, item_conf in enumerate(items_conf):
# instantiate an item of class "class" with the given configuration
# and append to list
items.append(item_conf["class"](item_conf))
# return list
return items
def get_items2(conf, item = None):
"""generic function creating a list of objects from a config specification
objects are: analyses, robots, worlds, tasks, losses, ...
"""
if item is None: return
items = []
for i, item_conf in enumerate(conf[item]):
# instantiate an item of class "class" with the given configuration
# and append to list
items.append(item_conf["class"](item_conf, conf["ifs"][i]))
# return list
return items
def get_items_with_ref(conf, ref = None):
"""generic function creating a list of objects from a config specification
objects are: analyses, robots, worlds, tasks, losses, ...
"""
if ref is None: return
items = []
for i, item_conf in enumerate(conf):
# instantiate an item of class "class" with the given configuration
# and append to list
items.append(item_conf["class"](item_conf, ref))
# return list
return items
def set_attr_from_dict(obj, dictionary):
"""set attributes of an object with names from the dictionary's keys and their values from the dictionary's values"""
for k,v in dictionary.items():
setattr(obj, k, v)
def set_attr_from_dict_ifs(ref, ifs_conf):
"""set the attributes in the ref'erenced from the interfaces configurations ifs_conf"""
# could also just use the ref.attr notation?
setattr(ref, "dim", 0)
setattr(ref, "dimnames", [])
setattr(ref, "smdict", OrderedDict())
setattr(ref, "smdict_index", OrderedDict())
setattr(ref, "ifs", [])
# ref.smstruct = ["dim_s_proprio", "dim_s_extero", "dim_s_intero", "dim_s_reward", "dim_s_pred", "dim_s_motor"]
# interface
for k in ifs_conf.keys():
# print "k", k
ref.ifs.append(k)
# smdict key
k_ = k.replace("dim_", "")
# dim of that part is length of fields array
dim_ = len(ifs_conf[k])
# set the class attribute
setattr(ref, k, dim_)
# count overall dims
ref.dim += dim_
# collect all variable names
ref.smdict_index[k_] = {}
for i, dimname in enumerate(ifs_conf[k]):
# this is local index for given dim group
ref.smdict_index[k_][dimname] = i
# this is globally associated with ref.sm
ref.dimnames.append(dimname)
# now that we know the dimensions of each part of sm space, initialize the vectors
ref.smdict[k_] = np.zeros((dim_, 1))
# print "%s ref(%s).smdict = %s" % ("set_attr_from_dict_ifs", ref.__class__.__name__, ref.smdict)
def make_column_names_numbered(base = "base", times = 1):
"""create an array of numbered instances of a base string"""
return ["%s%d" % (base, i) for i in range(times)]
def make_expr_id(name):
return "%s_%s" % (name, time.strftime("%Y%m%d_%H%M%S"))
def make_robot_name(expr_id, name, number):
return "%s_%s_%d" % (expr_id, name, number)
################################################################################
# cartesian to polar transformation for n dimensions
def ct_dynamic(r, alpha):
"""alpha: the n-2 values between [0,\pi) and last one between [0,2\pi)
"""
x = np.zeros(len(alpha) + 1)
s = 1
for e, a in enumerate(alpha):
x[e] = s*np.cos(a)
s *= np.sin(a)
x[len(alpha)] = s
return x*r
def ct_pol2car(r, arr):
"""n-sphere polar to cartesian"""
a1 = np.array([2*np.pi])
print "a1.shape= %s, arr.shape = %s" % (a1.shape, arr.shape)
a = np.concatenate((a1, arr))
si = np.sin(a)
si[0] = 1
si = np.cumprod(si)
co = np.cos(a)
co = np.roll(co, -1)
return si*co*r
def ct_car2pol(x):
"""n-sphere cartesian to polar, x cartesian column vector"""
p = np.zeros_like(x)
r = np.linalg.norm(x)
p[0] = r
for i in range(x.shape[0]-1):
if i == x.shape[0]-2:
if x[i+1] >= 0:
phi = np.arccos(x[i] / np.sqrt(np.sum(np.square(x[i:]))))
elif x[i+1] < 0:
phi = 2 * np.pi - np.arccos(x[i] / np.sqrt(np.sum(np.square(x[i:]))))
else:
phi = np.arccos(x[i] / np.sqrt(np.sum(np.square(x[i:]))))
p[i+1] = phi
return p
################################################################################
# some signal processing and math function helpers
| mit | -6,850,227,168,475,064,000 | 33.380282 | 121 | 0.571897 | false |
pyconau2017/symposion | symposion/proposals/templatetags/proposal_tags.py | 8 | 2220 | from django import template
from symposion.proposals.models import AdditionalSpeaker
register = template.Library()
class AssociatedProposalsNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
else:
raise template.TemplateSyntaxError("%r takes 'as var'" % bits[0])
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
request = context["request"]
if request.user.speaker_profile:
pending = AdditionalSpeaker.SPEAKING_STATUS_ACCEPTED
speaker = request.user.speaker_profile
queryset = AdditionalSpeaker.objects.filter(speaker=speaker, status=pending)
context[self.context_var] = [item.proposalbase for item in queryset]
else:
context[self.context_var] = None
return u""
class PendingProposalsNode(template.Node):
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
if len(bits) == 3 and bits[1] == "as":
return cls(bits[2])
else:
raise template.TemplateSyntaxError("%r takes 'as var'" % bits[0])
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
request = context["request"]
if request.user.speaker_profile:
pending = AdditionalSpeaker.SPEAKING_STATUS_PENDING
speaker = request.user.speaker_profile
queryset = AdditionalSpeaker.objects.filter(speaker=speaker, status=pending)
context[self.context_var] = [item.proposalbase for item in queryset]
else:
context[self.context_var] = None
return u""
@register.tag
def pending_proposals(parser, token):
"""
{% pending_proposals as pending_proposals %}
"""
return PendingProposalsNode.handle_token(parser, token)
@register.tag
def associated_proposals(parser, token):
"""
{% associated_proposals as associated_proposals %}
"""
return AssociatedProposalsNode.handle_token(parser, token)
| bsd-3-clause | -7,021,176,226,636,613,000 | 29.833333 | 88 | 0.638739 | false |
deuscoin-org/deuscoin-core | qa/rpc-tests/proxy_test.py | 1 | 8199 | #!/usr/bin/env python2
# Copyright (c) 2015 The Deuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import DeuscoinTestFramework
from test_framework.util import *
from test_framework.netutil import test_ipv6_local
'''
Test plan:
- Start deuscoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on deuscoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create deuscoinds that connect to them
- Manipulate the deuscoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(DeuscoinTestFramework):
def __init__(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
print "Warning: testing without local IPv6 support"
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
return start_nodes(4, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: deuscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: deuscoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("deuscoinostk4e4re.onion:8333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "deuscoinostk4e4re.onion")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| mit | -6,866,753,585,878,837,000 | 42.152632 | 146 | 0.610928 | false |
arielalmendral/ert | python/python/ert_gui/ide/keywords/ensemble_keywords.py | 5 | 6515 | from ert_gui.ide.keywords.definitions import IntegerArgument, KeywordDefinition, ConfigurationLineDefinition, PathArgument, StringArgument
class EnsembleKeywords(object):
def __init__(self, ert_keywords):
super(EnsembleKeywords, self).__init__()
self.group = "Ensemble"
ert_keywords.addKeyword(self.addNumRealizations())
ert_keywords.addKeyword(self.addEnsPath())
ert_keywords.addKeyword(self.addSelectCase())
ert_keywords.addKeyword(self.addEndDate())
ert_keywords.addKeyword(self.addHistorySource())
ert_keywords.addKeyword(self.addRefCase())
ert_keywords.addKeyword(self.addInclude())
ert_keywords.addKeyword(self.addObsConfig())
ert_keywords.addKeyword(self.addResultPath())
ert_keywords.addKeyword(self.addLicensePath())
ert_keywords.addKeyword(self.addLocalConfig())
ert_keywords.addKeyword(self.addRefcaseList())
def addNumRealizations(self):
num_realizations = ConfigurationLineDefinition(keyword=KeywordDefinition("NUM_REALIZATIONS"),
arguments=[IntegerArgument(from_value=1)],
documentation_link="keywords/num_realizations",
required=True,
group=self.group)
return num_realizations
def addEndDate(self):
end_date = ConfigurationLineDefinition(keyword=KeywordDefinition("END_DATE"),
arguments=[StringArgument()],
documentation_link="keywords/end_date",
required=False,
group=self.group)
return end_date
def addEnsPath(self):
ens_path = ConfigurationLineDefinition(keyword=KeywordDefinition("ENSPATH"),
arguments=[PathArgument()],
documentation_link="keywords/enspath",
required=False,
group=self.group)
return ens_path
def addSelectCase(self):
select_case = ConfigurationLineDefinition(keyword=KeywordDefinition("SELECT_CASE"),
arguments=[StringArgument()],
documentation_link="keywords/select_case",
required=False,
group=self.group)
return select_case
def addHistorySource(self):
history_source = ConfigurationLineDefinition(keyword=KeywordDefinition("HISTORY_SOURCE"),
arguments=[StringArgument(built_in=True)],
documentation_link="keywords/history_source",
required=False,
group=self.group)
return history_source
def addRefCase(self):
refcase = ConfigurationLineDefinition(keyword=KeywordDefinition("REFCASE"),
arguments=[PathArgument()],
documentation_link="keywords/refcase",
required=False,
group=self.group)
return refcase
def addRefcaseList(self):
refcase_list = ConfigurationLineDefinition(keyword=KeywordDefinition("REFCASE_LIST"),
arguments=[StringArgument(rest_of_line=True,allow_space=True)],
documentation_link="keywords/refcase_list",
required=False,
group=self.group)
return refcase_list
def addInclude(self):
include = ConfigurationLineDefinition(keyword=KeywordDefinition("INCLUDE"),
arguments=[PathArgument()],
documentation_link="keywords/include",
required=False,
group=self.group)
return include
def addObsConfig(self):
obs_config = ConfigurationLineDefinition(keyword=KeywordDefinition("OBS_CONFIG"),
arguments=[PathArgument()],
documentation_link="keywords/obs_config",
required=False,
group=self.group)
return obs_config
def addResultPath(self):
result_path = ConfigurationLineDefinition(keyword=KeywordDefinition("RESULT_PATH"),
arguments=[PathArgument()],
documentation_link="keywords/result_path",
required=False,
group=self.group)
return result_path
def addLicensePath(self):
license_path = ConfigurationLineDefinition(keyword=KeywordDefinition("LICENSE_PATH"),
arguments=[PathArgument()],
documentation_link="keywords/license_path",
required=False,
group=self.group)
return license_path
def addLocalConfig(self):
local_config = ConfigurationLineDefinition(keyword=KeywordDefinition("LOCAL_CONFIG"),
arguments=[StringArgument(rest_of_line=True,allow_space=True)],
documentation_link="keywords/local_config",
required=False,
group=self.group)
return local_config
| gpl-3.0 | -2,240,770,293,560,839,000 | 47.259259 | 138 | 0.465234 | false |
alexforencich/python-ivi | ivi/agilent/agilent8593A.py | 2 | 1518 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590A import *
class agilent8593A(agilentBase8590A):
"Agilent 8593A IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'HP8593A')
super(agilent8593A, self).__init__(*args, **kwargs)
self._input_impedance = 50
self._frequency_low = 9e3
self._frequency_high = 22e9
| mit | 5,837,321,742,123,896,000 | 35.142857 | 77 | 0.758235 | false |
kaiCu/mapproxy | mapproxy/test/system/test_tms.py | 5 | 10944 | # This file is part of the MapProxy project.
# Copyright (C) 2010-2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import hashlib
from io import BytesIO
from mapproxy.compat.image import Image
from mapproxy.test.image import is_jpeg, tmp_image
from mapproxy.test.http import mock_httpd
from mapproxy.test.system import module_setup, module_teardown, SystemTest, make_base_config
from nose.tools import eq_
test_config = {}
base_config = make_base_config(test_config)
def setup_module():
module_setup(test_config, 'layer.yaml', with_cache_data=True)
def teardown_module():
module_teardown(test_config)
class TestTMS(SystemTest):
config = test_config
def test_tms_capabilities(self):
resp = self.app.get('/tms/1.0.0/')
assert 'WMS Cache Layer' in resp
assert 'WMS Cache Multi Layer' in resp
assert 'TMS Cache Layer' in resp
assert 'TMS Cache Layer + FI' in resp
xml = resp.lxml
assert xml.xpath('count(//TileMap)') == 11
# without trailing space
resp2 = self.app.get('/tms/1.0.0')
eq_(resp.body, resp2.body)
def test_tms_layer_capabilities(self):
resp = self.app.get('/tms/1.0.0/wms_cache')
assert 'WMS Cache Layer' in resp
xml = resp.lxml
eq_(xml.xpath('count(//TileSet)'), 19)
def test_tms_root_resource(self):
resp = self.app.get('/tms')
resp2 = self.app.get('/tms/')
assert 'TileMapService' in resp and 'TileMapService' in resp2
xml = resp.lxml
eq_(xml.xpath('//TileMapService/@version'),['1.0.0'])
def test_tms_get_out_of_bounds_tile(self):
for coord in [(0, 0, -1), (-1, 0, 0), (0, -1, 0), (4, 2, 1), (1, 3, 0)]:
yield self.check_out_of_bounds, coord
def check_out_of_bounds(self, coord):
x, y, z = coord
url = '/tms/1.0.0/wms_cache/%d/%d/%d.jpeg' % (z, x, y)
resp = self.app.get(url , status=404)
xml = resp.lxml
assert ('outside the bounding box'
in xml.xpath('/TileMapServerError/Message/text()')[0])
def test_invalid_layer(self):
resp = self.app.get('/tms/1.0.0/inVAlid/0/0/0.png', status=404)
xml = resp.lxml
assert ('unknown layer: inVAlid'
in xml.xpath('/TileMapServerError/Message/text()')[0])
def test_invalid_format(self):
resp = self.app.get('/tms/1.0.0/wms_cache/0/0/1.png', status=404)
xml = resp.lxml
assert ('invalid format'
in xml.xpath('/TileMapServerError/Message/text()')[0])
def test_get_tile_tile_source_error(self):
resp = self.app.get('/tms/1.0.0/wms_cache/0/0/0.jpeg', status=500)
xml = resp.lxml
assert ('No response from URL'
in xml.xpath('/TileMapServerError/Message/text()')[0])
def test_get_cached_tile(self):
resp = self.app.get('/tms/1.0.0/wms_cache/0/0/1.jpeg')
eq_(resp.content_type, 'image/jpeg')
eq_(resp.content_length, len(resp.body))
data = BytesIO(resp.body)
assert is_jpeg(data)
def test_get_tile(self):
with tmp_image((256, 256), format='jpeg') as img:
expected_req = ({'path': r'/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fjpeg'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A900913&styles='
'&VERSION=1.1.1&BBOX=-20037508.3428,-20037508.3428,0.0,0.0'
'&WIDTH=256'},
{'body': img.read(), 'headers': {'content-type': 'image/jpeg'}})
with mock_httpd(('localhost', 42423), [expected_req], bbox_aware_query_comparator=True):
resp = self.app.get('/tms/1.0.0/wms_cache/0/0/0.jpeg')
eq_(resp.content_type, 'image/jpeg')
self.created_tiles.append('wms_cache_EPSG900913/01/000/000/000/000/000/000.jpeg')
def test_get_tile_from_cache_with_tile_source(self):
with tmp_image((256, 256), format='jpeg') as img:
expected_req = ({'path': r'/tiles/01/000/000/000/000/000/001.png'},
{'body': img.read(), 'headers': {'content-type': 'image/png'}})
with mock_httpd(('localhost', 42423), [expected_req]):
resp = self.app.get('/tms/1.0.0/tms_cache/0/0/1.png')
eq_(resp.content_type, 'image/png')
self.created_tiles.append('tms_cache_EPSG900913/01/000/000/000/000/000/001.png')
def test_get_tile_with_watermark_cache(self):
with tmp_image((256, 256), format='png', color=(0, 0, 0)) as img:
expected_req = ({'path': r'/tiles/01/000/000/000/000/000/000.png'},
{'body': img.read(), 'headers': {'content-type': 'image/png'}})
with mock_httpd(('localhost', 42423), [expected_req]):
resp = self.app.get('/tms/1.0.0/watermark_cache/0/0/0.png')
eq_(resp.content_type, 'image/png')
img = Image.open(BytesIO(resp.body))
colors = img.getcolors()
assert len(colors) >= 2
eq_(sorted(colors)[-1][1], (0, 0, 0))
class TestTileService(SystemTest):
config = test_config
def test_get_out_of_bounds_tile(self):
for coord in [(0, 0, -1), (-1, 0, 0), (0, -1, 0), (4, 2, 1), (1, 3, 0)]:
yield self.check_out_of_bounds, coord
def check_out_of_bounds(self, coord):
x, y, z = coord
url = '/tiles/wms_cache/%d/%d/%d.jpeg' % (z, x, y)
resp = self.app.get(url , status=404)
assert 'outside the bounding box' in resp
def test_invalid_layer(self):
resp = self.app.get('/tiles/inVAlid/0/0/0.png', status=404)
eq_(resp.content_type, 'text/plain')
assert 'unknown layer: inVAlid' in resp
def test_invalid_format(self):
resp = self.app.get('/tiles/wms_cache/0/0/1.png', status=404)
eq_(resp.content_type, 'text/plain')
assert 'invalid format' in resp
def test_get_tile_tile_source_error(self):
resp = self.app.get('/tiles/wms_cache/0/0/0.jpeg', status=500)
eq_(resp.content_type, 'text/plain')
assert 'No response from URL' in resp
def _check_tile_resp(self, resp):
eq_(resp.content_type, 'image/jpeg')
eq_(resp.content_length, len(resp.body))
data = BytesIO(resp.body)
assert is_jpeg(data)
def _update_timestamp(self):
timestamp = 1234567890.0
size = 10214
base_dir = base_config().cache.base_dir
os.utime(os.path.join(base_dir,
'wms_cache_EPSG900913/01/000/000/000/000/000/001.jpeg'),
(timestamp, timestamp))
max_age = base_config().tiles.expires_hours * 60 * 60
etag = hashlib.md5((str(timestamp) + str(size)).encode('ascii')).hexdigest()
return etag, max_age
def _check_cache_control_headers(self, resp, etag, max_age):
eq_(resp.headers['ETag'], etag)
eq_(resp.headers['Last-modified'], 'Fri, 13 Feb 2009 23:31:30 GMT')
eq_(resp.headers['Cache-control'], 'max-age=%d public' % max_age)
def test_get_cached_tile(self):
etag, max_age = self._update_timestamp()
resp = self.app.get('/tiles/wms_cache/1/0/1.jpeg')
self._check_cache_control_headers(resp, etag, max_age)
self._check_tile_resp(resp)
def test_get_cached_tile_flipped_y(self):
etag, max_age = self._update_timestamp()
resp = self.app.get('/tiles/wms_cache/1/0/0.jpeg?origin=nw')
self._check_cache_control_headers(resp, etag, max_age)
self._check_tile_resp(resp)
def test_if_none_match(self):
etag, max_age = self._update_timestamp()
resp = self.app.get('/tiles/wms_cache/1/0/1.jpeg',
headers={'If-None-Match': etag})
eq_(resp.status, '304 Not Modified')
self._check_cache_control_headers(resp, etag, max_age)
resp = self.app.get('/tiles/wms_cache/1/0/1.jpeg',
headers={'If-None-Match': etag + 'foo'})
self._check_cache_control_headers(resp, etag, max_age)
eq_(resp.status, '200 OK')
self._check_tile_resp(resp)
def test_if_modified_since(self):
etag, max_age = self._update_timestamp()
for date, modified in (
('Fri, 15 Feb 2009 23:31:30 GMT', False),
('Fri, 13 Feb 2009 23:31:31 GMT', False),
('Fri, 13 Feb 2009 23:31:30 GMT', False),
('Fri, 13 Feb 2009 23:31:29 GMT', True),
('Fri, 11 Feb 2009 23:31:29 GMT', True),
('Friday, 13-Feb-09 23:31:30 GMT', False),
('Friday, 13-Feb-09 23:31:29 GMT', True),
('Fri Feb 13 23:31:30 2009', False),
('Fri Feb 13 23:31:29 2009', True),
# and some invalid ones
('Fri Foo 13 23:31:29 2009', True),
('1234567890', True),
):
yield self.check_modified_response, date, modified, etag, max_age
def check_modified_response(self, date, modified, etag, max_age):
resp = self.app.get('/tiles/wms_cache/1/0/1.jpeg', headers={
'If-Modified-Since': date})
self._check_cache_control_headers(resp, etag, max_age)
if modified:
eq_(resp.status, '200 OK')
self._check_tile_resp(resp)
else:
eq_(resp.status, '304 Not Modified')
def test_get_tile(self):
with tmp_image((256, 256), format='jpeg') as img:
expected_req = ({'path': r'/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fjpeg'
'&REQUEST=GetMap&HEIGHT=256&SRS=EPSG%3A900913&styles='
'&VERSION=1.1.1&BBOX=-20037508.3428,-20037508.3428,0.0,0.0'
'&WIDTH=256'},
{'body': img.read(), 'headers': {'content-type': 'image/jpeg'}})
with mock_httpd(('localhost', 42423), [expected_req], bbox_aware_query_comparator=True):
resp = self.app.get('/tiles/wms_cache/1/0/0.jpeg')
eq_(resp.content_type, 'image/jpeg')
self.created_tiles.append('wms_cache_EPSG900913/01/000/000/000/000/000/000.jpeg')
| apache-2.0 | 8,842,133,026,679,673,000 | 43.307692 | 100 | 0.572186 | false |
v-samodelkin/TowerDefence | Info.py | 1 | 2772 | # -*- coding: utf8 -*-
import TowerDefenceController as Tdc
def shell(number, target):
if number == target:
return "[{0}]".format(number)
else:
return " {0} ".format(number)
def get_editor_readme(target):
readme = ["Управление\n",
shell('1', target) + " - пустая клетка\n",
shell('2', target) + " - игрок\n",
shell('3', target) + " - камень жизни\n",
shell('4', target) + " - шипы\n",
shell('5', target) + " - спираль\n",
shell('6', target) + " - стена\n",
shell('7', target) + " - стена [40%]\n",
" s - сохранение\n"]
return merge(readme)
def get_readme():
readme = ["Управление\n",
"wasd - перемещение\n",
"e - ожидание\n",
"space - выстрел\n",
"shift - пауза\n",
"r - Рестарт\n",
"1 - Шипы [40]\n",
"2 - Заграждение [10]\n",
"3 - Спираль [30]\n"]
return merge(readme)
def get_editor_description():
return merge([" /:\ (''') \n",
" |:| III \n",
" |:| III \n",
" |:| III \n",
" |:| __III__\n",
" |:| /:-.___,-:\\\n",
" |:| \] |:| [/\n",
" |:| |:| \n",
" |:| |:| \n",
" |:| |:| \n",
" /] |:| [\ |:| \n",
" \:-''''`-:/ |:| \n",
" ''III'' |:| \n",
" III |:| \n",
" III |:| \n",
" III |:| \n",
" (___) \:/"])
def merge(string_list):
merged = ""
for element in string_list:
merged += element
return merged
# noinspection PyBroadException
def get_records():
try:
with open(Tdc.records_file_name, "r") as f:
records = list(map(int, f.read().split()))
records.sort(reverse=True)
if len(records) == 0:
return "Рекордов нет"
text = "Рекорды по очкам\n"
counter = 1
for record in records:
text += "{0} место: {1}\n".format(counter, record)
counter += 1
return text
except FileNotFoundError:
return "Рекорды по очкам\nРекордов нет."
except Exception:
return "Рекорды по очкам\nОшибка загрузки"
| mit | 4,161,223,196,234,994,000 | 30.097561 | 66 | 0.379216 | false |
odoousers2014/LibrERP | sale_bom/sale_mrp.py | 1 | 8598 | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (c) 2013-2014 Didotech srl (info at didotech.com)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp.osv import orm, fields
import decimal_precision as dp
class sale_order_line(orm.Model):
_inherit = "sale.order.line"
_columns = {
'mrp_bom': fields.one2many('sale.order.line.mrp.bom', 'order_id', 'Bom Lines', readonly=True, states={'draft': [('readonly', False)]}),
'with_bom': fields.boolean(string='With BOM'),
}
_defaults = {
'with_bom': False,
}
def product_id_change(self, cr, uid, ids, pricelist, product_id, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False,
packaging=False, fiscal_position=False, flag=False, context=None):
result = super(sale_order_line, self).product_id_change(cr, uid, ids, pricelist, product_id, qty, uom, qty_uos, uos, name, partner_id,
lang, update_tax, date_order, packaging, fiscal_position, flag, context)
mrp_bom_obj = self.pool['mrp.bom']
if product_id:
product = self.pool['product.product'].browse(cr, uid, product_id, context=context)
if product.supply_method == 'produce':
result['value']['with_bom'] = True
mrp_bom_ids = mrp_bom_obj.search(cr, uid, [('product_id', '=', product_id), ])
if mrp_bom_ids and len(mrp_bom_ids) == 1:
mrp_bom = mrp_bom_obj.browse(cr, uid, mrp_bom_ids[0])
#line_mrp_bom_obj = self.pool.get('sale.order.line.mrp.bom')
if mrp_bom.bom_lines:
result['value']['mrp_bom'] = []
for bom_line in mrp_bom.bom_lines:
line_bom = {
'product_id': bom_line.product_id.id,
'product_uom_qty': bom_line.product_qty,
'product_uom': bom_line.product_uom.id,
'price_unit': bom_line.product_id.cost_price,
'price_subtotal': bom_line.product_qty * bom_line.product_id.cost_price
}
if ids and len(ids) == 1:
line_bom['order_id'] = ids[0]
result['value']['mrp_bom'].append(line_bom)
else:
result['value']['with_bom'] = False
## {'value': result, 'domain': domain, 'warning': warning}
return result
# def onchange_mrp_bom(self, cr, uid, ids, mrp_bom, product_id, context=None):
# uom_obj = self.pool['product.uom']
# bom_obj = self.pool['mrp.bom']
#
# mrp_bom_filtered = [bom_id for bom_id in mrp_bom if not (isinstance(bom_id, (tuple, list)) and bom_id and bom_id[0] == 2) and not bom_id[0] == 5]
#
# line_mrps = self.resolve_o2m_commands_to_record_dicts(cr, uid, 'mrp_bom', mrp_bom_filtered, context=context)
#
# # Attention! This lines dupplicate _compute_purchase_price() from product_bom.product module
# price = 0.
#
# for line_mrp in line_mrps:
# #print line_mrp
# if line_mrp['product_uom']:
# if isinstance(line_mrp['product_uom'], (tuple, list)):
# uom_id = line_mrp['product_uom'][0]
# elif isinstance(line_mrp['product_uom'], int):
# uom_id = line_mrp['product_uom']
# qty = uom_obj._compute_qty(cr, uid,
# from_uom_id=uom_id,
# qty=line_mrp['product_uom_qty'],
# to_uom_id=uom_id)
# price += line_mrp['price_unit'] * qty
# else:
# price += line_mrp['price_unit'] * line_mrp['product_uom_qty']
#
# bom_ids = bom_obj.search(cr, uid, [('product_id', '=', product_id), ('bom_id', '=', False)])
# if bom_ids:
# bom = bom_obj.browse(cr, uid, bom_ids[0])
# if bom.routing_id:
# for wline in bom.routing_id.workcenter_lines:
# wc = wline.workcenter_id
# cycle = wline.cycle_nbr
# # hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0)
# price += wc.costs_cycle * cycle + wc.costs_hour * wline.hour_nbr
# price /= bom.product_qty
# price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price, bom.product_id.uom_id.id)
#
# mrp_bom_new = []
# for line in mrp_bom:
# if line[2] and not line[2].get('price_subtotal', False) and line[2].get('price_unit', False) and line[2].get('product_uom_qty', False):
# line[2]['price_subtotal'] = line[2]['price_unit'] * line[2]['product_uom_qty']
# mrp_bom_new.append(line)
#
# if mrp_bom_new:
# return {'value': {'purchase_price': price, 'mrp_bom': mrp_bom_new}}
# else:
# return {'value': {'purchase_price': price}}
class sale_order_line_mrp_bom(orm.Model):
_name = 'sale.order.line.mrp.bom'
_description = 'Sales Order Bom Line'
# def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
# if context is None:
# context = {}
# res = {}
#
# for line in self.browse(cr, uid, ids, context=context):
# res[line.id] = line.price_unit * line.product_uom_qty
# return res
_columns = {
'name': fields.char('Note', size=256, select=True),
'order_id': fields.many2one('sale.order.line', 'Order Reference', ondelete='cascade', select=True),
'product_id': fields.many2one('product.product', 'Product', change_default=True),
'product_uom_qty': fields.float('Quantity (UoM)', digits_compute=dp.get_precision('Product UoS'), required=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'price_unit': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Sale Price')),
'price_subtotal': fields.float('Subtotal', required=True, digits_compute=dp.get_precision('Sale Price')),
# 'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Sale Price')),
}
_order = 'sequence, id'
_defaults = {
'product_uom_qty': 1,
'sequence': 10,
'price_unit': 0.0,
'order_id': lambda self, cr, uid, context: context.get('default_sale_order_line', False) or False
}
def bom_product_id_change(self, cr, uid, ids, product_id, uom_id, product_qty, price_unit, context=None):
if product_id:
product = self.pool['product.product'].browse(cr, uid, product_id)
#qty = self.pool['product.uom']._compute_qty(cr, uid,
# from_uom_id=uom_id,
# qty=product_qty,
# to_uom_id=uom_id)
return {'value': {
'price_unit': price_unit or product.cost_price,
'product_uom': uom_id or product.uom_id.id,
'price_subtotal': price_unit * product_qty,
}}
else:
return {'value': {}}
| agpl-3.0 | 4,000,021,663,064,934,400 | 48.988372 | 155 | 0.522796 | false |
rafirosenberg/pinry | pinry/core/tests/helpers.py | 2 | 2779 | from django.conf import settings
from django.contrib.auth.models import Permission
from django.core.files.images import ImageFile
from django.db.models.query import QuerySet
from django.test import TestCase
from django_images.models import Thumbnail
import factory
from taggit.models import Tag
from ..models import Pin, Image
from ...users.models import User
TEST_IMAGE_PATH = 'logo.png'
class UserFactory(factory.Factory):
FACTORY_FOR = User
username = factory.Sequence(lambda n: 'user_{}'.format(n))
email = factory.Sequence(lambda n: 'user_{}@example.com'.format(n))
@factory.post_generation(extract_prefix='password')
def set_password(self, create, extracted, **kwargs):
self.set_password(extracted)
self.save()
@factory.post_generation(extract_prefix='user_permissions')
def set_user_permissions(self, create, extracted, **kwargs):
self.user_permissions = Permission.objects.filter(codename__in=['add_pin', 'add_image'])
class TagFactory(factory.Factory):
FACTORY_FOR = Tag
name = factory.Sequence(lambda n: 'tag_{}'.format(n))
class ImageFactory(factory.Factory):
FACTORY_FOR = Image
image = factory.LazyAttribute(lambda a: ImageFile(open(TEST_IMAGE_PATH, 'rb')))
@factory.post_generation()
def create_thumbnails(self, create, extracted, **kwargs):
for size in settings.IMAGE_SIZES.keys():
Thumbnail.objects.get_or_create_at_size(self.pk, size)
class PinFactory(factory.Factory):
FACTORY_FOR = Pin
submitter = factory.SubFactory(UserFactory)
image = factory.SubFactory(ImageFactory)
@factory.post_generation(extract_prefix='tags')
def add_tags(self, create, extracted, **kwargs):
if isinstance(extracted, Tag):
self.tags.add(extracted)
elif isinstance(extracted, list):
self.tags.add(*extracted)
elif isinstance(extracted, QuerySet):
self.tags = extracted
else:
self.tags.add(TagFactory())
class PinFactoryTest(TestCase):
def test_default_tags(self):
self.assertTrue(PinFactory().tags.get(pk=1).name.startswith('tag_'))
def test_custom_tag(self):
custom = 'custom_tag'
self.assertEqual(PinFactory(tags=Tag.objects.create(name=custom)).tags.get(pk=1).name, custom)
def test_custom_tags_list(self):
tags = TagFactory.create_batch(2)
PinFactory(tags=tags)
self.assertEqual(Tag.objects.count(), 2)
def test_custom_tags_queryset(self):
TagFactory.create_batch(2)
tags = Tag.objects.all()
PinFactory(tags=tags)
self.assertEqual(Tag.objects.count(), 2)
def test_empty_tags(self):
PinFactory(tags=[])
self.assertEqual(Tag.objects.count(), 0)
| bsd-2-clause | -567,018,051,069,702,000 | 29.877778 | 102 | 0.68082 | false |
a10networks/a10sdk-python | a10sdk/core/cgnv6/cgnv6_lsn_health_check_gateway.py | 2 | 1452 | from a10sdk.common.A10BaseClass import A10BaseClass
class HealthCheckGateway(A10BaseClass):
"""Class Description::
Configure LSN health-check gateway.
Class health-check-gateway supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ipv4_addr: {"optional": false, "type": "string", "description": "Specify IPv4 Gateway", "format": "ipv4-address"}
:param ipv6_addr: {"optional": false, "type": "string", "description": "Specify IPv6 Gateway", "format": "ipv6-address"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/health-check-gateway/{ipv4_addr}+{ipv6_addr}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "ipv4_addr","ipv6_addr"]
self.b_key = "health-check-gateway"
self.a10_url="/axapi/v3/cgnv6/lsn/health-check-gateway/{ipv4_addr}+{ipv6_addr}"
self.DeviceProxy = ""
self.ipv4_addr = ""
self.ipv6_addr = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| apache-2.0 | -3,744,487,415,763,063,300 | 35.3 | 168 | 0.637741 | false |
schef/schef.github.io | source/16/mc-16-01-06-amajor-1black.py | 1 | 1753 | #!/usr/bin/python3
#import time
import random
import imp
modl = imp.load_source('ppFunctions', '../00/ppFunctions.py')
logo = imp.load_source('logo', '../00/logo.py')
import os
from ppFunctions import *
from termcolor import colored, cprint
os.system('clear')
from logo import *
#sleep becouse of loading midi modules
print_logo()
time.sleep(1)
print_status = lambda x: cprint(x, 'white', 'on_blue')
print_help = lambda x: cprint(x, 'red')
hit = 0
rounde = 1
done = False
generatedList = []
for i in range(stringToMidiNum("c"), stringToMidiNum("c'")+1):
if i%12 in blackTonesBase:
generatedList.append(i)
while True:
try:
os.system('clear')
print_logo()
print_status("Status: round=" + str(rounde) + ", hit=" + str(hit))
print_help("Help: rEPEAT sKIP")
playHarmonicNotes(stringToMidiNum("a cis' e'"))
randomNote = random.choice(generatedList)
playNote(randomNote)
while not done:
guessedNote = input("Your input: ")
if guessedNote == "r":
print("Repeating...")
playHarmonicNotes(stringToMidiNum("a cis' e'"))
playNote(randomNote)
elif guessedNote == "s":
print("Skiping...")
done = True
elif guessedNote not in lilypondTones:
print("What? Syntax error!")
else:
if (lilypondTones[guessedNote] == randomNote%12):
print("Yea!")
hit += 1
rounde += 1
done = True
else:
print("Almost!")
playNote(randomNote)
playNote(lilypondTones[guessedNote])
hit = 0
done = False
except (KeyboardInterrupt):
print('...Program Stopped Manually!')
raise
| mit | 9,182,928,115,827,582,000 | 27.737705 | 70 | 0.589276 | false |
caiofsouza/AddLibrary | AddLibrary.py | 1 | 8436 | import sublime, sublime_plugin
import os, re
import threading
import json
from .Libraries import Libraries
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, URLError
def downloadLib(folder, lib):
settings = sublime.load_settings("AddLibrary.sublime-settings")
default_lib = settings.get("default_folder")
if not os.path.exists(folder + default_lib):
os.makedirs(folder + default_lib)
nf = GetLibThread(lib, folder + default_lib)
nf.start()
# global http get
def httpGet(url):
request_obj = Request(url, method='GET')
req = urlopen(url=request_obj)
return req
class AddLibrary(sublime_plugin.TextCommand):
def __init__(self, view):
self.view = view
self.window = sublime.active_window()
self.folder_path = self.window.folders()
def run(self, edit):
self.libraries_names = Libraries().getLibrariesName()
self.install_on_folder = None
self.selected_lib = None
arr_to_list = []
for ln in self.libraries_names:
arr_to_list.append("Download - "+ln)
# select a lib to download
self.window.show_quick_panel(arr_to_list, self.selectedLibrary)
def selectedLibrary(self, option_index):
if option_index > -1:
self.selected_lib = Libraries().getLibraryByName(self.libraries_names[option_index])
self.folder_path = self.window.folders()
# if have more than a active folder in sublime
if len(self.folder_path) > 1:
name_of_folders = self.folder_path
self.window.show_quick_panel(name_of_folders, self.selectFolder)
else:
self.install_on_folder = self.folder_path[0]
downloadLib(self.install_on_folder, self.selected_lib)
def selectFolder(self, folder_index):
self.install_on_folder = self.folder_path[folder_index]
downloadLib(self.install_on_folder, self.selected_lib)
class SearchLibrary(sublime_plugin.TextCommand):
def __init__(self, view):
self.view = view
self.window = sublime.active_window()
self.result_arr = []
self.result_arr_list = []
self.lib_versions = []
self.searchURL = 'https://api.cdnjs.com/libraries?search='
self.install_on_folder = ''
def run(self, edit):
self.window.show_input_panel("Search for:", "", self.searchTerm, None, None)
def searchTerm(self, term):
if term:
search_req = httpGet(self.searchURL + term + '&fields=version,description').read().decode('utf-8')
get_results = json.loads(search_req)['results']
for lib in get_results:
self.result_arr.append(lib['name'])
self.result_arr_list.append([lib['name'],lib['name'] + '-' + lib['version'],lib['description']])
self.window.show_quick_panel(self.result_arr_list, self.selectFindedLib)
def selectFindedLib(self, result_index):
if result_index > -1:
self.selected_lib = { 'search_name': self.result_arr[result_index], 'name': self.result_arr[result_index] }
search_t = SearchLibVersions(self.result_arr[result_index])
search_t.start()
self.result_arr = []
self.result_arr_list = []
def selectFolder(self, folder_index):
self.install_on_folder = self.folder_path[folder_index]
downloadLib(self.install_on_folder, self.selected_lib)
# get lib files via thread
class GetLibThread(threading.Thread):
def __init__(self, selected_lib, install_on):
self.selected_lib = selected_lib
self.selected_lib_name = selected_lib['search_name']
self.install_on = install_on
self.apiRoot = 'https://api.cdnjs.com/libraries/'
self.apiSearch = self.apiRoot + self.selected_lib_name
self.cdnURL = 'https://cdnjs.cloudflare.com/ajax/libs/'
threading.Thread.__init__(self)
def run(self):
sublime.status_message("Downloading " + self.selected_lib['name'] + "...")
if 'dependencies' in self.selected_lib:
for d in self.selected_lib['dependencies']:
# for each dependency, create a new recursive thread
selected_dep_lib = Libraries().getLibraryBySearchName(d)
get_t = GetLibThread(selected_dep_lib, self.install_on)
get_t.start()
get_latest_req = httpGet(self.apiSearch).read().decode('utf-8')
get_latest = json.loads(get_latest_req)
if get_latest:
# get the filename
file_name_by_url = get_latest['filename']
lib_folder = self.install_on + "/" + get_latest['name'] + '-' + get_latest['version']
# create the dir
if os.path.isdir(lib_folder) == False:
os.mkdir(lib_folder)
if 'assets' in get_latest:
for asset in get_latest['assets']:
if asset['version'] == get_latest['version']:
for file_name in asset['files']:
# get the latest version
latest_url = self.cdnURL + '/'+ get_latest['name'] + '/' + get_latest['version'] + '/' + file_name
# print(latest_url)
file_path = lib_folder + '/' + file_name
new_f_t = newFileThread(latest_url, file_path, file_name)
new_f_t.start()
# get lib files via thread
class GetLibVersionThread(threading.Thread):
def __init__(self, selected_lib, install_on, version):
self.selected_lib = selected_lib
self.install_on = install_on
self.target_version = version
self.apiRoot = 'https://api.cdnjs.com/libraries/'
self.apiSearch = self.apiRoot + self.selected_lib
self.cdnURL = 'https://cdnjs.cloudflare.com/ajax/libs/'
threading.Thread.__init__(self)
def run(self):
sublime.status_message("Downloading " + self.selected_lib + "...")
settings = sublime.load_settings("AddLibrary.sublime-settings")
default_lib = settings.get("default_folder")
get_lib_req = httpGet(self.apiSearch).read().decode('utf-8')
lib_list = json.loads(get_lib_req)
# folder that lib will be
lib_folder = self.install_on + default_lib + "/" + self.selected_lib + '-' + self.target_version
if os.path.isdir(self.install_on + default_lib) == False:
os.mkdir(self.install_on + default_lib)
if os.path.isdir(lib_folder) == False:
os.mkdir(lib_folder)
for assets in lib_list['assets']:
if assets['version'] == self.target_version:
for file_name in assets['files']:
# get the latest version
file_url = self.cdnURL + '/'+ self.selected_lib + '/' + self.target_version + '/' + file_name
file_path = lib_folder + '/' + file_name
new_f_t = newFileThread(file_url, file_path, file_name)
new_f_t.start()
# create a file via thread
class newFileThread(threading.Thread):
def __init__(self, url_to_content, file_path, file_name):
self.url_to_content = url_to_content
self.file_path = file_path
self.file_name = file_name
threading.Thread.__init__(self)
def run(self):
file_content = httpGet(self.url_to_content).read().decode('utf-8')
directory = os.path.dirname(self.file_path)
if not os.path.exists(directory):
os.makedirs(directory)
# create the file
new_file = os.open( self.file_path, os.O_RDWR|os.O_CREAT )
os.write( new_file, file_content.encode('utf-8') )
os.close( new_file )
sublime.status_message("File downloaded: " + self.file_name)
class SearchLibVersions(threading.Thread):
def __init__(self, lib_name):
self.lib_name = lib_name
self.libraryURL = 'https://api.cdnjs.com/libraries/'
self.window = sublime.active_window()
self.folder_path = self.window.folders()
self.list_versions = []
threading.Thread.__init__(self)
def run(self):
versions_req = httpGet(self.libraryURL + self.lib_name).read().decode('utf-8')
self.lib_versions = json.loads(versions_req)
self.list_lib_versions = []
for version in self.lib_versions['assets']:
self.list_versions.append([ self.lib_versions['name'] + ' - version '+version['version'] ])
self.list_lib_versions.append(version['version'])
self.window.show_quick_panel(self.list_versions, self.installLibByVersion)
def installLibByVersion(self, selected_version_index):
if selected_version_index > -1:
selected_version = self.list_lib_versions[selected_version_index]
self.folder_path = self.window.folders()
# if have more than a active folder in sublime
if len(self.folder_path) > 1:
name_of_folders = self.folder_path
self.window.show_quick_panel(name_of_folders, self.selectFolder)
else:
self.install_on_folder = self.folder_path[0]
get_version_t = GetLibVersionThread(self.lib_name, self.install_on_folder, selected_version )
get_version_t.start()
def selectFolder(self, folder_index):
self.install_on_folder = self.folder_path[folder_index]
get_version_t = GetLibVersionThread(self.lib_name, self.install_on_folder, selected_version )
get_version_t.start()
| mit | -6,707,283,293,623,892,000 | 31.198473 | 110 | 0.69073 | false |
cryptickp/heat | heat/tests/test_common_context.py | 2 | 13976 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from oslo_config import cfg
from oslo_middleware import request_id
from oslo_policy import opts as policy_opts
from oslo_utils import importutils
import webob
from heat.common import context
from heat.common import exception
from heat.tests import common
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
class TestRequestContext(common.HeatTestCase):
def setUp(self):
self.ctx = {'username': 'mick',
'trustor_user_id': None,
'auth_token': '123',
'auth_token_info': {'123info': 'woop'},
'is_admin': False,
'user': 'mick',
'password': 'foo',
'trust_id': None,
'show_deleted': False,
'roles': ['arole', 'notadmin'],
'tenant_id': '456tenant',
'user_id': 'fooUser',
'tenant': u'\u5218\u80dc',
'auth_url': 'http://xyz',
'aws_creds': 'blah',
'region_name': 'RegionOne',
'user_identity': 'fooUser 456tenant'}
super(TestRequestContext, self).setUp()
def test_request_context_init(self):
ctx = context.RequestContext(
auth_token=self.ctx.get('auth_token'),
username=self.ctx.get('username'),
password=self.ctx.get('password'),
aws_creds=self.ctx.get('aws_creds'),
tenant=self.ctx.get('tenant'),
tenant_id=self.ctx.get('tenant_id'),
user_id=self.ctx.get('user_id'),
auth_url=self.ctx.get('auth_url'),
roles=self.ctx.get('roles'),
show_deleted=self.ctx.get('show_deleted'),
is_admin=self.ctx.get('is_admin'),
auth_token_info=self.ctx.get('auth_token_info'),
trustor_user_id=self.ctx.get('trustor_user_id'),
trust_id=self.ctx.get('trust_id'),
user=self.ctx.get('user'),
region_name=self.ctx.get('region_name'))
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_from_dict(self):
ctx = context.RequestContext.from_dict(self.ctx)
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_update(self):
ctx = context.RequestContext.from_dict(self.ctx)
for k in self.ctx:
if k == 'user_identity':
continue
self.assertEqual(self.ctx.get(k), ctx.to_dict().get(k))
override = '%s_override' % k
setattr(ctx, k, override)
self.assertEqual(override, ctx.to_dict().get(k))
def test_get_admin_context(self):
ctx = context.get_admin_context()
self.assertTrue(ctx.is_admin)
self.assertFalse(ctx.show_deleted)
def test_get_admin_context_show_deleted(self):
ctx = context.get_admin_context(show_deleted=True)
self.assertTrue(ctx.is_admin)
self.assertTrue(ctx.show_deleted)
def test_admin_context_policy_true(self):
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = True
ctx = context.RequestContext(roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_admin_context_policy_false(self):
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(roles=['notadmin'])
self.assertFalse(ctx.is_admin)
def test_keystone_v3_endpoint_in_context(self):
"""Ensure that the context is the preferred source for the
auth_uri.
"""
cfg.CONF.set_override('auth_uri', 'http://xyz',
group='clients_keystone')
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(
auth_url='http://example.com:5000/v2.0')
self.assertEqual(ctx.keystone_v3_endpoint,
'http://example.com:5000/v3')
def test_keystone_v3_endpoint_in_clients_keystone_config(self):
"""Ensure that the [clients_keystone] section of the configuration is
the preferred source when the context does not have the auth_uri.
"""
cfg.CONF.set_override('auth_uri', 'http://xyz',
group='clients_keystone')
importutils.import_module('keystonemiddleware.auth_token')
cfg.CONF.set_override('auth_uri', 'http://abc/v2.0',
group='keystone_authtoken')
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
with mock.patch('keystoneclient.discover.Discover') as discover:
class MockDiscover(object):
def url_for(self, endpoint):
return 'http://xyz/v3'
discover.return_value = MockDiscover()
ctx = context.RequestContext(auth_url=None)
self.assertEqual(ctx.keystone_v3_endpoint, 'http://xyz/v3')
def test_keystone_v3_endpoint_in_keystone_authtoken_config(self):
"""Ensure that the [keystone_authtoken] section of the configuration
is used when the auth_uri is not defined in the context or the
[clients_keystone] section.
"""
importutils.import_module('keystonemiddleware.auth_token')
cfg.CONF.set_override('auth_uri', 'http://abc/v2.0',
group='keystone_authtoken')
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(auth_url=None)
self.assertEqual(ctx.keystone_v3_endpoint, 'http://abc/v3')
def test_keystone_v3_endpoint_not_set_in_config(self):
"""Ensure an exception is raised when the auth_uri cannot be obtained
from any source.
"""
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(auth_url=None)
self.assertRaises(exception.AuthorizationFailure, getattr, ctx,
'keystone_v3_endpoint')
class RequestContextMiddlewareTest(common.HeatTestCase):
scenarios = [(
'empty_headers',
dict(
environ=None,
headers={},
expected_exception=None,
context_dict={
'auth_token': None,
'auth_token_info': None,
'auth_url': None,
'aws_creds': None,
'is_admin': False,
'password': None,
'roles': [],
'show_deleted': False,
'tenant': None,
'tenant_id': None,
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': None,
'username': None
})
), (
'username_password',
dict(
environ=None,
headers={
'X-Auth-User': 'my_username',
'X-Auth-Key': 'my_password',
'X-Auth-EC2-Creds': '{"ec2Credentials": {}}',
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken',
'X-Project-Name': 'my_tenant',
'X-Project-Id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3'
},
expected_exception=None,
context_dict={
'auth_token': 'atoken',
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': None,
'is_admin': False,
'password': 'my_password',
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant',
'tenant_id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': 'my_username',
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': 'my_username'
})
), (
'aws_creds',
dict(
environ=None,
headers={
'X-Auth-EC2-Creds': '{"ec2Credentials": {}}',
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken',
'X-Project-Name': 'my_tenant',
'X-Project-Id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3',
},
expected_exception=None,
context_dict={
'auth_token': 'atoken',
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': '{"ec2Credentials": {}}',
'is_admin': False,
'password': None,
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant',
'tenant_id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': None
})
), (
'token_creds',
dict(
environ={'keystone.token_info': {'info': 123}},
headers={
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken2',
'X-Project-Name': 'my_tenant2',
'X-Project-Id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3',
},
expected_exception=None,
context_dict={
'auth_token': 'atoken2',
'auth_token_info': {'info': 123},
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': None,
'is_admin': False,
'password': None,
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant2',
'tenant_id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': None
})
), (
'malformed_roles',
dict(
environ=None,
headers={
'X-Roles': [],
},
expected_exception=exception.NotAuthenticated)
)]
def setUp(self):
super(RequestContextMiddlewareTest, self).setUp()
opts = [
cfg.StrOpt('config_dir', default=policy_path),
cfg.StrOpt('config_file', default='foo'),
cfg.StrOpt('project', default='heat'),
]
cfg.CONF.register_opts(opts)
policy_opts.set_defaults(cfg.CONF, 'check_admin.json')
def test_context_middleware(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/stacks', headers=self.headers,
environ=self.environ)
if self.expected_exception:
self.assertRaises(
self.expected_exception, middleware.process_request, request)
else:
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertIsNotNone(ctx.get('request_id'))
def test_context_middleware_with_requestid(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/stacks', headers=self.headers,
environ=self.environ)
req_id = 'req-5a63f0d7-1b69-447b-b621-4ea87cc7186d'
request.environ[request_id.ENV_REQUEST_ID] = req_id
if self.expected_exception:
self.assertRaises(
self.expected_exception, middleware.process_request, request)
else:
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertEqual(
ctx.get('request_id'), req_id,
'Key request_id values do not match')
| apache-2.0 | -5,614,957,604,447,868,000 | 39.393064 | 78 | 0.53091 | false |
GuessWhoSamFoo/pandas | pandas/tests/util/test_validate_args.py | 2 | 2224 | # -*- coding: utf-8 -*-
from collections import OrderedDict
import pytest
from pandas.util._validators import validate_args
_fname = "func"
def test_bad_min_fname_arg_count():
msg = "'max_fname_arg_count' must be non-negative"
with pytest.raises(ValueError, match=msg):
validate_args(_fname, (None,), -1, "foo")
def test_bad_arg_length_max_value_single():
args = (None, None)
compat_args = ("foo",)
min_fname_arg_count = 0
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"argument \({actual_length} given\)"
.format(fname=_fname, max_length=max_length,
actual_length=actual_length))
with pytest.raises(TypeError, match=msg):
validate_args(_fname, args, min_fname_arg_count, compat_args)
def test_bad_arg_length_max_value_multiple():
args = (None, None)
compat_args = dict(foo=None)
min_fname_arg_count = 2
max_length = len(compat_args) + min_fname_arg_count
actual_length = len(args) + min_fname_arg_count
msg = (r"{fname}\(\) takes at most {max_length} "
r"arguments \({actual_length} given\)"
.format(fname=_fname, max_length=max_length,
actual_length=actual_length))
with pytest.raises(TypeError, match=msg):
validate_args(_fname, args, min_fname_arg_count, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_defaults(i):
bad_arg = "foo"
msg = ("the '{arg}' parameter is not supported "
r"in the pandas implementation of {func}\(\)".
format(arg=bad_arg, func=_fname))
compat_args = OrderedDict()
compat_args["foo"] = 2
compat_args["bar"] = -1
compat_args["baz"] = 3
arg_vals = (1, -1, 3)
with pytest.raises(ValueError, match=msg):
validate_args(_fname, arg_vals[:i], 2, compat_args)
def test_validation():
# No exceptions should be raised.
validate_args(_fname, (None,), 2, dict(out=None))
compat_args = OrderedDict()
compat_args["axis"] = 1
compat_args["out"] = None
validate_args(_fname, (1, None), 2, compat_args)
| bsd-3-clause | 9,048,335,448,136,567,000 | 28.263158 | 69 | 0.616906 | false |
nmercier/linux-cross-gcc | linux/lib/python2.7/dist-packages/blueman/plugins/mechanism/Network.py | 1 | 4154 | # Copyright (C) 2009 Valmantas Paliksa <walmis at balticum-tv dot lt>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from blueman.plugins.MechanismPlugin import MechanismPlugin
import os
import subprocess
from gi.repository import GObject
from blueman.main.NetConf import NetConf, DnsMasqHandler, DhcpdHandler
DHCPDHANDLERS = {"DnsMasqHandler": DnsMasqHandler,
"DhcpdHandler": DhcpdHandler},
class Network(MechanismPlugin):
def on_load(self):
self.add_dbus_method(self.SetGN, in_signature="b", out_signature="", sender_keyword="caller")
self.add_dbus_method(self.NetworkSetup, in_signature="sbs", out_signature="", sender_keyword="caller")
self.add_dbus_method(self.DhcpClient, in_signature="s", out_signature="s", sender_keyword="caller", async_callbacks=("ok", "err"))
self.add_dbus_method(self.EnableNetwork, in_signature="ayays", out_signature="", sender_keyword="caller", byte_arrays=True)
self.add_dbus_method(self.DisableNetwork, in_signature="", out_signature="", sender_keyword="caller")
self.add_dbus_method(self.ReloadNetwork, in_signature="", out_signature="", sender_keyword="caller")
def DhcpClient(self, net_interface, caller, ok, err):
self.timer.stop()
self.confirm_authorization(caller, "org.blueman.dhcp.client")
from blueman.main.DhcpClient import DhcpClient
def dh_error(dh, message, ok, err):
err(message)
self.timer.resume()
def dh_connected(dh, ip, ok, err):
ok(ip)
self.timer.resume()
dh = DhcpClient(net_interface)
dh.connect("error-occurred", dh_error, ok, err)
dh.connect("connected", dh_connected, ok, err)
try:
dh.Connect()
except Exception, e:
err(e)
def SetGN(self, enabled, caller):
self.timer.reset()
if enabled:
p = subprocess.Popen(["/usr/sbin/avahi-autoipd", "-D", "pan0"], env=os.environ, bufsize=128)
else:
p = subprocess.Popen(["/usr/sbin/avahi-autoipd", "-k", "pan0"], bufsize=128)
#reap the child
GObject.child_watch_add(p.pid, lambda pid, cond: 0)
def EnableNetwork(self, ip_address, netmask, dhcp_handler, caller):
nc = NetConf.get_default()
nc.set_ipv4(ip_address, netmask)
nc.set_dhcp_handler(DHCPDHANDLERS[dhcp_handler])
nc.apply_settings()
def ReloadNetwork(self, caller):
nc = NetConf.get_default()
nc.apply_settings()
def DisableNetwork(self, caller):
nc = NetConf.get_default()
nc.remove_settings()
nc.set_ipv4(None, None)
nc.store()
def NetworkSetup(self, ip_address, allow_nat, server_type, caller):
self.timer.reset()
dprint(ip_address, allow_nat, server_type)
if ip_address == "reload":
info = netstatus()
nc = None
if info["ip"] != "0" and not nc_is_running():
if info["type"] == "dnsmasq":
nc = NetConfDnsMasq(None)
elif info["type"] == "dhcpd":
nc = NetConfDhcpd(None)
if nc:
nc.reload_settings()
return
self.confirm_authorization(caller, "org.blueman.network.setup")
if ip_address == "0":
info = netstatus()
nc = None
try:
if info["type"] == "dnsmasq":
nc = NetConfDnsMasq(None)
elif info["type"] == "dhcpd":
nc = NetConfDhcpd(None)
except:
#fallback
nc = NetConf(None)
nc.uninstall()
else:
if ip_chk(ip_address):
nc = None
if server_type == "dnsmasq":
nc = NetConfDnsMasq(ip_address, allow_nat)
elif server_type == "dhcpd":
nc = NetConfDhcpd(ip_address, allow_nat)
if nc:
nc.install()
else:
return dbus.DBusException("IP Invalid")
| bsd-3-clause | -5,156,855,811,527,410,000 | 30.953846 | 132 | 0.686326 | false |
silly-wacky-3-town-toon/SOURCE-COD | toontown/hood/TutorialHood.py | 1 | 1188 | from panda3d.core import *
from panda3d.direct import *
import ToonHood
from toontown.town import TutorialTownLoader
from toontown.toonbase.ToontownGlobals import *
import SkyUtil
class TutorialHood(ToonHood.ToonHood):
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
ToonHood.ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
self.id = Tutorial
self.townLoaderClass = TutorialTownLoader.TutorialTownLoader
self.safeZoneLoaderClass = None
self.storageDNAFile = None
self.skyFile = 'phase_3.5/models/props/TT_sky'
self.titleColor = (1.0, 0.5, 0.4, 1.0)
return
def load(self):
ToonHood.ToonHood.load(self)
self.parentFSM.getStateNamed('TutorialHood').addChild(self.fsm)
def unload(self):
self.parentFSM.getStateNamed('TutorialHood').removeChild(self.fsm)
ToonHood.ToonHood.unload(self)
def enter(self, *args):
ToonHood.ToonHood.enter(self, *args)
def exit(self):
ToonHood.ToonHood.exit(self)
def skyTrack(self, task):
return SkyUtil.cloudSkyTrack(task)
def startSky(self):
SkyUtil.startCloudSky(self)
| apache-2.0 | -3,643,774,705,771,048,400 | 30.263158 | 80 | 0.686027 | false |
adaptivelogic/django-cms | cms/tests/po.py | 16 | 2695 | from __future__ import with_statement
from cms.test_utils.util.context_managers import TemporaryDirectory
from django.core.management.base import CommandError
from django.core.management.commands.compilemessages import (compile_messages,
has_bom)
from django.test.testcases import TestCase
import os
import shutil
import subprocess
import sys
THIS_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', 'locale'))
def compile_messages():
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
for basedir in basedirs:
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
fn = os.path.join(dirpath, f)
if has_bom(fn):
raise CommandError("The %s file has a BOM (Byte Order Mark). Django only supports .po files encoded in UTF-8 and without any BOM." % fn)
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
if sys.platform == 'win32': # Different shell-variable syntax
bits = ['msgfmt', '--check-format', '-o', pf + '.mo', pf + '.po']
else:
bits = ['msgfmt', '--check-format', '-o', pf + '.mo', pf + '.po']
pipe = subprocess.Popen(bits, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = pipe.communicate()[-1]
if pipe.returncode != 0:
return False, stderr
return True, ''
class PoTest(TestCase):
def test_po_sanity(self):
with TemporaryDirectory() as tmpdir:
shutil.copytree(SOURCE_DIR, os.path.join(tmpdir, 'locale'))
olddir = os.getcwd()
os.chdir(tmpdir)
ok, stderr = compile_messages()
os.chdir(olddir)
self.assertTrue(ok, stderr) | bsd-3-clause | 4,796,568,711,538,916,000 | 44.694915 | 160 | 0.596289 | false |
libo/Enigma2 | lib/python/Screens/CopyFiles.py | 2 | 1518 | import os
import Components.Task
from twisted.internet import reactor, threads, task
class FailedPostcondition(Components.Task.Condition):
def __init__(self, exception):
self.exception = exception
def getErrorMessage(self, task):
return str(self.exception)
def check(self, task):
return self.exception is None
class CopyFileTask(Components.Task.PythonTask):
def openFiles(self, fileList):
self.callback = None
self.fileList = fileList
self.handles = [(open(fn[0], 'rb'), open(fn[1], 'wb')) for fn in fileList]
self.end = 0
for src,dst in fileList:
try:
self.end += os.stat(src).st_size
except:
print "Failed to stat", src
if not self.end:
self.end = 1
print "[CopyFileTask] size:", self.end
def work(self):
print "[CopyFileTask] handles ", len(self.handles)
try:
for src, dst in self.handles:
while 1:
if self.aborted:
print "[CopyFileTask] aborting"
raise Exception, "Aborted"
d = src.read(65536)
if not d:
src.close()
dst.close()
# EOF
break
dst.write(d)
self.pos += len(d)
except:
# In any event, close all handles
for src, dst in self.handles:
src.close()
dst.close()
for s,d in self.fileList:
# Remove incomplete data.
try:
os.unlink(d)
except:
pass
raise
def copyFiles(fileList, name):
name = _("Copy") + " " + name
job = Components.Task.Job(name)
task = CopyFileTask(job, name)
task.openFiles(fileList)
Components.Task.job_manager.AddJob(job)
| gpl-2.0 | 1,773,166,990,751,767,600 | 23.095238 | 76 | 0.654809 | false |
MontmereLimited/django-lean | django_lean/experiments/tests/test_management.py | 2 | 1943 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import logging
l = logging.getLogger(__name__)
from datetime import timedelta
from django.conf import settings
from django.core.management.base import CommandError
from django_lean.experiments.models import (Experiment, DailyEngagementReport,
DailyConversionReport)
from django_lean.experiments.management.commands import (
update_experiment_reports
)
from django_lean.experiments.tests.utils import patch, TestCase
class TestManagement(TestCase):
def setUp(self):
self.experiment = Experiment(name="test_experiment")
self.experiment.save()
self.experiment.state = Experiment.ENABLED_STATE
self.experiment.save()
self.experiment.start_date = (self.experiment.start_date -
timedelta(days=5))
self.experiment.save()
def testManageCommand(self):
with patch(settings, 'LEAN_ENGAGEMENT_CALCULATOR',
'django_lean.experiments.testsettings.SimpleEngagementCalculator'):
#make sure the manage.py command that generates daily stats work
#Running with arguments should raise Exception
self.assertRaises(CommandError,
update_experiment_reports.Command().handle,
"some", "args")
#This is what manage.py will call
self.runner = update_experiment_reports.Command().run_from_argv
#Run the reports
self.runner(['manage.py', 'update_experiment_reports'])
#Make sure they were generated
self.assertEqual(5, DailyEngagementReport.objects.filter(
experiment=self.experiment).count())
self.assertEqual(5, DailyConversionReport.objects.filter(
experiment=self.experiment).count())
| bsd-3-clause | -5,092,741,849,169,924,000 | 39.479167 | 87 | 0.632012 | false |
neilmunday/pespad | pespad.py | 1 | 17179 | #!/usr/bin/env python2
#
# This file is part of PESPad.
#
# PESPad allows any device that can run a web browser to be used as
# control pad for Linux based operating systems.
#
# Copyright (C) 2014 Neil Munday ([email protected])
#
# PESPad is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PESPad is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PESPad. If not, see <http://www.gnu.org/licenses/>.
#
"""
pespad will launch a HTTP 1.1 daemon on the chosen port of the host.
When a client connects they will be served a web base interface that
includes a control pad. Once a control pad has been requested, pespad
will try to create a new joystick device on the host using the uinput
Linux kernel module.
Button presses on the control pad in the browser will then be sent
to the joystick device and any programs that are listening for joystick
input will respond accordingly.
Each client is assigned their own joystick device.
pespad has been successfully used with the Pi Entertainment System,
available from http://pes.mundayweb.com
It has also been used with other Linux operating systems.
Note: if using with RetroArch, make sure you set the joystick driver
to "linuxraw".
Acknowledgements:
HTTP server code based on code from: http://blog.wachowicz.eu/?p=256
Daemon class based on code from: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
Sencha for their SenchaTouch 1.1 JavaScript framework
"""
import atexit
import argparse
import logging
import os
import sys
import signal
import socket
import time
import uinput
import signal
from signal import SIGTERM
import threading
CLIENT_TIMEOUT = 1800 # 30 mins
MAX_CLIENTS = 16 # max number of active clients
def shutdownServer(sig, dummy):
global server
if server:
server.shutdown()
logging.shutdown()
sys.exit(0)
def stopDaemon(sig, dummy):
global server
if server:
server.shutdown()
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, loglevel, logfile=None, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.__stdin = stdin
self.__stdout = stdout
self.__stderr = stderr
self.__pidfile = pidfile
self.__logfile = logfile
self.__loglevel = loglevel
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
logging.shutdown()
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
logging.shutdown()
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
logging.shutdown()
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
logging.shutdown()
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.__stdin, 'r')
so = file(self.__stdout, 'a+')
se = file(self.__stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.__pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.__pidfile)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
def start(self):
"""
Start the daemon
"""
if self.__logfile:
# remove old log file
if os.path.exists(self.__logfile):
os.remove(self.__logfile)
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S', filename=self.__logfile, level=self.__loglevel)
logging.debug("Created new log file")
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=self.__loglevel)
if self.status():
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.__pidfile)
logging.shutdown()
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def status(self):
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.__pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
return True
return False
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.__pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.__pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.__pidfile):
os.remove(self.__pidfile)
else:
print str(err)
logging.shutdown()
sys.exit(1)
class Button(object):
def __init__(self, uinputCode, pressed=False):
self.__uinputCode = uinputCode
self.__pressed = pressed
def changeState(self):
self.__pressed = not self.__pressed
return self.__pressed
def getCode(self):
return self.__uinputCode
class Client(object):
def __del__(self):
logging.debug('Deleting Client object for %s' % self.__ip)
del self.__device
def __init__(self, ip):
self.__ip = ip
self.__device = uinput.Device([uinput.BTN_JOYSTICK, uinput.BTN_DPAD_UP, uinput.BTN_DPAD_DOWN, uinput.BTN_DPAD_LEFT, uinput.BTN_DPAD_RIGHT, uinput.BTN_START, uinput.BTN_SELECT, uinput.BTN_0, uinput.BTN_1, uinput.BTN_2, uinput.BTN_3, uinput.BTN_4, uinput.BTN_5, uinput.BTN_6, uinput.BTN_7, uinput.BTN_8, uinput.BTN_9], "pespad")
self.__lastContact = int(time.time())
def emit(self, btn, state):
self.__device.emit(btn, state)
def getDevice(self):
return self.__device
def getIp(self):
return self.__ip
def getLastContact(self):
return self.__lastContact
def updateContactTime(self):
self.__lastContact = int(time.time())
class PESPadServer(Daemon):
def __init__(self, port, pidfile, loglevel, logfile=None):
super(PESPadServer, self).__init__(pidfile, loglevel, logfile)
self.__host = ''
self.__port = port
self.__baseDir = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
self.__webroot = self.__baseDir + os.sep + 'web'
self.__checkDir(self.__webroot)
self.__logfile = logfile
self.__loglevel = loglevel
self.__socket = None
# BTN mappings:
# BTN_JOYSTICK = Exit
# 0 = Load state
# 1 = Save state
# 2 = A
# 3 = B
# 4 = X
# 5 = Y
# 6 = left shoulder
# 7 = right shoulder
# 8 = left shoulder 2
# 9 = right shoulder 2
self.__jsMap = {}
self.__jsMap['exit'] = Button(uinput.BTN_JOYSTICK)
self.__jsMap['start'] = Button(uinput.BTN_START)
self.__jsMap['select'] = Button(uinput.BTN_SELECT)
self.__jsMap['load'] = Button(uinput.BTN_0)
self.__jsMap['save'] = Button(uinput.BTN_1)
self.__jsMap['a'] = Button(uinput.BTN_2)
self.__jsMap['b'] = Button(uinput.BTN_3)
self.__jsMap['x'] = Button(uinput.BTN_4)
self.__jsMap['y'] = Button(uinput.BTN_5)
self.__jsMap['l1shoulder'] = Button(uinput.BTN_6)
self.__jsMap['r1shoulder'] = Button(uinput.BTN_7)
self.__jsMap['l2shoulder'] = Button(uinput.BTN_8)
self.__jsMap['r2shoulder'] = Button(uinput.BTN_9)
self.__jsMap['up'] = Button(uinput.BTN_DPAD_UP)
self.__jsMap['down'] = Button(uinput.BTN_DPAD_DOWN)
self.__jsMap['left'] = Button(uinput.BTN_DPAD_LEFT)
self.__jsMap['right'] = Button(uinput.BTN_DPAD_RIGHT)
self.__clients = {}
self.__clientCleanUpThread = None
def __checkDir(self, dir):
if not os.path.exists(dir):
self.__exit("Error: %s does not exist!" % dir)
if not os.path.isdir(dir):
self.__exit("Error: %s is not a directory!" % dir)
def __createHeaders(self, code):
s = ''
if code == 200:
s = "HTTP/1.1 200 OK\n"
elif code == 404:
s = "HTTP/1.1 404 Not Found\n"
elif code == 500:
s = "HTTP/1.1 500 Internal server error\n"
s += "Date: " + time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "\n"
s += "Server: PES HTTP Server\n"
s += "Connection: close\n\n"
return s
def createSocket(self):
if self.__logfile:
# remove old log file
if os.path.exists(self.__logfile):
os.remove(self.__logfile)
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S', filename=self.__logfile, level=self.__loglevel)
logging.debug("Created new log file")
else:
logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%Y/%m/%d %H:%M:%S', level=self.__loglevel)
# try to get the socket before daemonizing
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
i = 0
acquired = False
while i < 10:
try:
logging.info('Attempting to launching HTTP server on %s:%d' % (self.__host, self.__port))
self.__socket.bind((self.__host, self.__port))
logging.info('Success!')
acquired = True
break
except Exception, e:
logging.info("Could not acquire port %d (attempt #%d)" % (self.__port, i + 1))
time.sleep(1)
i += 1
if not acquired:
logging.critical("Could not acquire port after %d attempts!" % i)
sys.stderr.write("Could not acquire port after %d attempts!\n" % i)
sys.exit(1)
def __exit(self, msg):
print msg
self.shutdown()
logging.shutdown()
sys.exit(1)
def __listen(self):
logging.info("Starting PESPad HTTP server")
while True:
logging.info("Waiting for connections...")
self.__socket.listen(3) # no. of queued connections
conn, addr = self.__socket.accept()
ip = addr[0]
logging.info("Got connection from %s" % ip)
data = conn.recv(1024)
s = bytes.decode(data)
requestMethod = s.split(' ')[0]
logging.debug("Method: %s" % requestMethod)
logging.debug("Request body: %s" % s)
if requestMethod == "GET" or requestMethod == "HEAD":
content = ''
f = s.split(' ')[1]
f = f.split('?')[0] # ignore arguments
if f[0:4] == '/js/':
# handle remote joystick input
btnStr = f[4:]
if btnStr == 'connect':
# a JS device has been requested
if not ip in self.__clients:
logging.info("Creating device for %s" % ip)
try:
self.__clients[ip] = Client(ip)
content = "{ \"success\": true }\n"
except Exception, e:
logging.debug("Exception occurred when trying to create device:\n%s" % e)
content = "{ \"success\": false, \"error\": \"Could not create uinput device!\" }"
else:
content = "{ \"success\": true }\n"
headers = self.__createHeaders(200)
elif btnStr == 'disconnect':
headers = self.__createHeaders(200)
if not ip in self.__clients:
content = "{ \"success\": true }"
else:
self.removeClient(ip)
content = "{ \"success\": true }"
elif not btnStr in self.__jsMap:
logging.debug("Unknown button: %s from %s" % (btnStr, ip))
headers, content = self.__pageNotFound(f)
else:
headers = self.__createHeaders(200)
if not ip in self.__clients:
logging.info("No device found for %s - ignoring request" % ip)
headers = self.__createHeaders(200)
content = "{ \"success\": false, \"error\": \"Device not recognised, please refresh your browser\" }\n"
else:
btn = self.__jsMap[btnStr]
logging.debug("%s button press processed for %s" % (btnStr, ip))
self.__clients[ip].emit(btn.getCode(), int(btn.changeState()))
content = "{ \"success\": true }\n"
else:
if f == '/':
f = '/index.html'
f = self.__webroot + f
logging.debug("Serving file: %s" % f)
try:
if requestMethod == 'GET':
handler = open(f, 'rb')
content = handler.read()
handler.close()
headers = self.__createHeaders(200)
except Exception, e:
logging.info("File %s not found" % f)
headers, content = self.__pageNotFound(f)
response = headers.encode()
if requestMethod == "GET":
response += content
conn.send(response)
logging.debug("Closing connection to client")
conn.close()
else:
logging.info("Unknown/unsupported request method: %s" % requestMethod)
def getClients(self):
return self.__clients
def __pageNotFound(self, f):
headers = self.__createHeaders(404)
content = b"<html><head><title>File not found</title><head><body>File %s not found on this server</body></html>" % f
return (headers, content)
def removeClient(self, ip):
if ip in self.__clients:
logging.info('Removing joystick device for client %s' % ip)
del self.__clients[ip]
def restart(self):
sys.stderr.write("restart operation is not supported by PESPad server. Please stop the server yourself and then try to restart. This is beause the port takes time to free\n")
def run(self):
if not self.__socket:
logging.critical("socket not created - did you call createSocket first?")
logging.shutdown()
sys.exit(1)
# start client cleanup thread
self.__clientCleanUpThread = ClientCleanUpThread(self)
self.__clientCleanUpThread.start()
self.__listen()
def shutdown(self):
try:
logging.debug('Stopping clean up thread...')
if self.__clientCleanUpThread:
self.__clientCleanUpThread.stop()
logging.info('Stopping the server...')
self.__socket.shutdown(socket.SHUT_RDWR)
logging.info('Success!')
except Exception, e:
logging.warning('Failed to shutdown the socket!')
def start(self):
if self.status():
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.__pidfile)
logging.shutdown()
sys.exit(1)
self.createSocket()
self.daemonize()
self.run()
class ClientCleanUpThread(threading.Thread):
def __init__(self, server):
threading.Thread.__init__(self)
self.__stop = False
self.__sleep = 10
self.__server = server
logging.debug('ClientCleanUpThread created')
def run(self):
logging.debug('ClientCleanUp thread started')
while True:
if self.__stop:
logging.debug('ClientCleanUp thread stopped')
return
now = time.time()
clients = self.__server.getClients()
logging.debug('Checking %d client(s) for recent activity' % len(clients))
clientsToDelete = [] # can't modify dictionary whilst iterating over it so use a list to store candidates
client = None
for client in clients.itervalues():
ip = client.getIp()
if now - client.getLastContact() > CLIENT_TIMEOUT:
clientsToDelete.append(ip)
else:
logging.debug('Client %s is still active' % ip)
del client # remove reference to object so that it can be delete later
if len(clientsToDelete) > 0:
for ip in clientsToDelete:
self.__server.removeClient(ip)
time.sleep(self.__sleep)
def stop(self):
self.__stop = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Launch the PESPad server', add_help=True)
parser.add_argument('-v', '--verbose', help='Turn on debug messages', dest='verbose', action='store_true')
parser.add_argument('-p', '--port', help='Listen on this port', type=int, dest='port', default=80)
parser.add_argument('-l', '--log', help='File to log messages to', type=str, dest='logfile')
parser.add_argument('-d', '--daemon', help='Run PESPad as a daemon', dest='daemon', choices=['start', 'stop', 'status'])
args = parser.parse_args()
if args.daemon and not args.logfile:
sys.stderr.write("Please specify a log file when running as a daemon\n")
sys.exit(1)
logLevel = logging.INFO
if args.verbose:
logLevel = logging.DEBUG
pidfile = '/tmp/pespad.pid'
server = PESPadServer(args.port, pidfile, logLevel, args.logfile)
if args.daemon:
if args.daemon == 'start':
#signal.signal(signal.SIGTERM, stopDaemon)
server.start()
elif args.daemon == 'stop':
server.stop()
elif args.daemon == 'status':
if server.status():
print "Server is running"
else:
print "Server is not running"
else:
signal.signal(signal.SIGTERM, shutdownServer)
signal.signal(signal.SIGINT, shutdownServer)
server.createSocket()
server.run()
logging.shutdown()
sys.exit(0)
| gpl-3.0 | -8,410,798,589,090,936,000 | 28.215986 | 328 | 0.656965 | false |
ianastewart/cwltc-admin | cardless/urls.py | 1 | 2140 | # GO CARDLESS URLs
from django.urls import path
from .views import (
RedirectFlowView,
MandateCreateView,
MandateSuccessView,
PayoutListView,
PayoutDetailView,
PaymentDetailView,
PaymentCreateView,
PaymentSuccessView,
PaymentFailureView,
PaymentProcessView,
CardlessImportView,
CustomerDetailView,
)
urlpatterns = [
path("redirectflow/", RedirectFlowView.as_view(), name="cardless_redirect_flow"),
path(
"mandate/create/i/<str:invoice_token>/",
MandateCreateView.as_view(),
name="cardless_mandate_create_i",
),
path(
"mandate/create/p/<str:person_token>/",
MandateCreateView.as_view(),
name="cardless_mandate_create_p",
),
path(
"mandate/create/pi/<str:person_token>/<str:invoice_token>/",
MandateCreateView.as_view(),
name="cardless_mandate_create_pi",
),
path(
"mandate/success/",
MandateSuccessView.as_view(),
name="cardless_mandate_success",
),
path(
"customer/detail/<str:person_id>/",
CustomerDetailView.as_view(),
name="cardless_customer_detail",
),
path("payout/list/", PayoutListView.as_view(), name="cardless_payout_list"),
path(
"payout/detail/<str:id>/",
PayoutDetailView.as_view(),
name="cardless_payout_detail",
),
path(
"payment/detail/<str:id>/",
PaymentDetailView.as_view(),
name="cardless_payment_detail",
),
path(
"payment/create/<str:invoice_token>/",
PaymentCreateView.as_view(),
name="cardless_payment_create",
),
path(
"payment/success/<str:invoice_token>/",
PaymentSuccessView.as_view(),
name="cardless_payment_success",
),
path(
"payment/failure/<str:invoice_token>/",
PaymentFailureView.as_view(),
name="cardless_payment_failure",
),
path(
"payment/process/",
PaymentProcessView.as_view(),
name="cardless_payment_process",
),
path("import/", CardlessImportView.as_view(), name="cardless_import"),
]
| mit | 280,400,045,023,137,820 | 26.792208 | 85 | 0.606542 | false |
Ardesco/selenium | py/test/selenium/webdriver/firefox/ff_profile_tests.py | 1 | 5326 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import os
import zipfile
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
try:
unicode
except NameError:
unicode = str
from selenium.webdriver import Firefox, FirefoxProfile
def test_that_we_can_accept_a_profile(capabilities, webserver):
profile1 = FirefoxProfile()
profile1.set_preference("browser.startup.homepage_override.mstone", "")
profile1.set_preference("startup.homepage_welcome_url", webserver.where_is('simpleTest.html'))
profile1.update_preferences()
profile2 = FirefoxProfile(profile1.path)
driver = Firefox(
capabilities=capabilities,
firefox_profile=profile2)
title = driver.title
driver.quit()
assert "Hello WebDriver" == title
def test_that_prefs_are_written_in_the_correct_format():
profile = FirefoxProfile()
profile.set_preference("sample.preference", "hi there")
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference"]
encoded = profile.encoded
decoded = base64.b64decode(encoded)
with BytesIO(decoded) as fp:
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith("user.js"):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
def test_that_unicode_prefs_are_written_in_the_correct_format():
profile = FirefoxProfile()
profile.set_preference('sample.preference.2', unicode('hi there'))
profile.update_preferences()
assert 'hi there' == profile.default_preferences["sample.preference.2"]
encoded = profile.encoded
decoded = base64.b64decode(encoded)
with BytesIO(decoded) as fp:
zip = zipfile.ZipFile(fp, "r")
for entry in zip.namelist():
if entry.endswith('user.js'):
user_js = zip.read(entry)
for line in user_js.splitlines():
if line.startswith(b'user_pref("sample.preference.2",'):
assert line.endswith(b'hi there");')
# there should be only one user.js
break
def test_that_integer_prefs_are_written_in_the_correct_format():
profile = FirefoxProfile()
profile.set_preference("sample.int.preference", 12345)
profile.update_preferences()
assert 12345 == profile.default_preferences["sample.int.preference"]
def test_that_boolean_prefs_are_written_in_the_correct_format():
profile = FirefoxProfile()
profile.set_preference("sample.bool.preference", True)
profile.update_preferences()
assert profile.default_preferences["sample.bool.preference"] is True
def test_profiles_do_not_share_preferences():
profile1 = FirefoxProfile()
profile1.accept_untrusted_certs = False
profile2 = FirefoxProfile()
# Default is true. Should remain so.
assert profile2.default_preferences["webdriver_accept_untrusted_certs"] is True
def test_add_extension_web_extension_without_id(capabilities, webserver):
current_directory = os.path.dirname(os.path.realpath(__file__))
root_directory = os.path.join(current_directory, '..', '..', '..', '..', '..')
extension_path = os.path.join(root_directory, 'third_party', 'firebug', 'mooltipass-1.1.87.xpi')
profile = FirefoxProfile()
profile.add_extension(extension_path)
driver = Firefox(capabilities=capabilities, firefox_profile=profile)
profile_path = driver.firefox_profile.path
extension_path_in_profile = os.path.join(profile_path, 'extensions', '[email protected]')
assert os.path.exists(extension_path_in_profile)
driver.quit()
def test_add_extension_legacy_extension(capabilities, webserver):
current_directory = os.path.dirname(os.path.realpath(__file__))
root_directory = os.path.join(current_directory, '..', '..', '..', '..', '..')
extension_path = os.path.join(root_directory, 'third_party', 'firebug', 'firebug-1.5.0-fx.xpi')
profile = FirefoxProfile()
profile.add_extension(extension_path)
driver = Firefox(capabilities=capabilities, firefox_profile=profile)
profile_path = driver.firefox_profile.path
extension_path_in_profile = os.path.join(profile_path, 'extensions', '[email protected]')
assert os.path.exists(extension_path_in_profile)
driver.quit()
| apache-2.0 | -8,314,822,997,736,182,000 | 36.77305 | 106 | 0.692264 | false |
liurenqiu520/AutobahnPython | examples/twisted/wamp/basic/rpc/slowsquare/backend.py | 3 | 1552 | ###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet.defer import Deferred, \
inlineCallbacks, \
returnValue
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.util import sleep
class Component(ApplicationSession):
"""
A math service application component.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
def onJoin(self, details):
def square(x):
return x * x
self.register(square, 'com.math.square')
@inlineCallbacks
def slowsquare(x):
yield sleep(1)
returnValue(x * x)
self.register(slowsquare, 'com.math.slowsquare')
| apache-2.0 | -3,504,484,263,271,957,000 | 27.218182 | 79 | 0.583763 | false |
sorrison/python-alogger | alogger/parsers/slurm.py | 1 | 4969 | import datetime
import time
# Maybe there is some isomething in datetime that takes a ISO std string but I cannot find it, DRB.
def DateTime_from_String(datetimeSt):
"""Gets a date time string like 2010-09-10T15:54:18 and retuns a datetime object
raises a ValueError if it all goes wrong """
DayTime = datetimeSt.split('T')
if len(DayTime) != 2:
raise ValueError
Date = DayTime[0].split('-')
if len(Date) != 3:
raise ValueError
Time = DayTime[1].split(':')
if len(Time) != 3:
raise ValueError
dt = datetime.datetime(
year=int(Date[0]),
month=int(Date[1]),
day=int(Date[2]),
hour=int(Time[0]),
minute=int(Time[1]),
second=int(Time[2])
)
return dt
def SecondsFromSlurmTime(timeString):
"""This function could be merged into get_in_seconds above but its here to leave
clear break between the Slurm addition and original.
It deals with the fact that slurm may return est_wall_time as
00nnn, 00:00:00 or 0-00:00:00.
"""
if timeString.find(':') == -1: # straight second format
return int(timeString)
if timeString.find('-') == -1: # must be a (eg) 10:00:00 case
Seconds = ((int(timeString.split(':')[0]) * 3600) + ((int(timeString.split(':')[1]) * 60)) + int(timeString.split(':')[2]))
else:
DayRest = timeString.split('-')
Seconds = int(DayRest[0]) * 3600 * 24
Seconds = Seconds + (int(DayRest[1].split(':')[0]) * 3600)
Seconds = Seconds + ((int(DayRest[1].split(':')[1]) * 60))
Seconds = Seconds + int(DayRest[1].split(':')[2])
return Seconds
def slurm_to_dict(line):
"""Parses a Slurm log file into dictionary"""
raw_data = line.split(' ')
data = {}
formatted_data = {}
# break up line into a temp dictionary
for d in raw_data:
try:
key, value = d.split('=')
data[key] = value
except ValueError:
continue
# Note that the order these are done in is important !
formatted_data['jobid'] = data['JobId']
formatted_data['cores'] = int(data['ProcCnt'])
formatted_data['user'] = data['UserId'][:data['UserId'].find('(')] # 'mike(543)' - remove the uid in brackets.
formatted_data['project'] = data['Account']
# If SubmitTime is invalid and non-existant use StartTime instead.
try:
formatted_data['qtime'] = DateTime_from_String(data['SubmitTime']).isoformat(' ') # '2010-07-30T15:34:39'
formatted_data['ctime'] = DateTime_from_String(data['SubmitTime']).isoformat(' ') # for practical purposes, same as etime here.
except (ValueError, KeyError):
formatted_data['qtime'] = DateTime_from_String(data['StartTime']).isoformat(' ')
formatted_data['ctime'] = DateTime_from_String(data['StartTime']).isoformat(' ')
# old records don't have a submit time time.
# If data['StartTime'] or data['EndTime'] is bad or not given, the following statements will fail
formatted_data['start'] = DateTime_from_String(data['StartTime']).isoformat(' ')
# formatted_data['etime'] # don't care
formatted_data['act_wall_time'] = int(time.mktime(DateTime_from_String(data['EndTime']).timetuple())) - int(time.mktime(DateTime_from_String(data['StartTime']).timetuple()))
formatted_data['record_time'] = DateTime_from_String(data['StartTime']).isoformat(' ')
formatted_data['cpu_usage'] = formatted_data['act_wall_time'] * formatted_data['cores']
formatted_data['jobname'] = data['Name'] # Note that this is the name of the script, not --jobname
try:
formatted_data['est_wall_time'] = SecondsFromSlurmTime(data['TimeLimit']) # might be 5-00:00:00 or 18:00:00
except ValueError:
formatted_data['est_wall_time'] = -1 # Sometimes returns 'UNLIMITED' !
try:
formatted_data['exit_status'] = int(data['JobState']) # might be "COMPLETED", "CANCELLED", "TIMEOUT" and may have multiple entries per line !
except ValueError:
formatted_data['exit_status'] = 0 # Watch out, Sam says dbase expects an int !!!
formatted_data['queue'] = 'UNKNOWN'
formatted_data['mem'] = 0
formatted_data['vmem'] = 0
formatted_data['list_mem'] = 0
formatted_data['list_vmem'] = 0
formatted_data['list_pmem'] = 0
formatted_data['list_pvmem'] = 0
formatted_data['etime'] = formatted_data['qtime']
# Things we don't seem to have available, would like qtime and est_wall_time
# mem, qtime, list_pmem, list_pvmem, queue, vmem, list_vmem, jobname.
# Note that "out of the box" slurm does not report on Queue or Creation time.
return formatted_data
| gpl-3.0 | 8,470,292,470,819,105,000 | 47.242718 | 177 | 0.593882 | false |
adityacs/ansible | lib/ansible/modules/network/ios/ios_system.py | 12 | 12484 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'core',
'version': '1.0'
}
DOCUMENTATION = """
---
module: ios_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- The C(hostname) argument will configure the device hostname
parameter on Cisco IOS devices. The C(hostname) value is an
ASCII string value.
required: false
default: null
domain_name:
description:
- The C(description) argument will configure the IP domain name
on the remote device to the provided value. The C(domain_name)
argument should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name
required: false
default: null
domain_search:
description:
- The C(domain_list) provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
required: false
default: null
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
required: false
default: null
lookup_enabled:
description:
- The C(lookup_enabled) argument provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
required: false
default: null
choices: ['true', 'false']
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
required: false
default: null
state:
description:
- The C(state) argument configures the state of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain name
ios_system:
hostname: ios01
domain_name: eng.ansible.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
ios_system:
state: absent
- name: configure DNS lookup sources
ios_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
ios_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname ios01
- ip domain name eng.ansible.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ios import get_config, load_config
from ansible.module_utils.ios import ios_argument_spec, check_args
from ansible.module_utils.network_common import ComplexList
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall('vrf definition (\S+)', config)
return vrf in _CONFIGURED_VRFS
def requires_vrf(module, vrf):
if not has_vrf(module, vrf):
module.fail_json(msg='vrf %s is not configured' % vrf)
def diff_list(want, have):
adds = [w for w in want if w not in have]
removes = [h for h in have if h not in want]
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'Router':
commands.append('no hostname')
if have['lookup_source']:
commands.append('no ip domain lookup source-interface %s' % have['lookup_source'])
if have['lookup_enabled'] is False:
commands.append('ip domain lookup')
vrfs = set()
for item in have['domain_name']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip domain name vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip domain name')
vrfs = set()
for item in have['domain_search']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip domain list vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip domain list')
vrfs = set()
for item in have['name_servers']:
if item['vrf'] and item['vrf'] not in vrfs:
vrfs.add(item['vrf'])
commands.append('no ip name-server vrf %s' % item['vrf'])
elif None not in vrfs:
vrfs.add(None)
commands.append('no ip name-server')
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('lookup_source'):
commands.append('ip domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'ip domain lookup'
if want['lookup_enabled'] is False:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['domain_name']:
adds, removes = diff_list(want['domain_name'], have['domain_name'])
for item in removes:
if item['vrf']:
commands.append('no ip domain name vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('no ip domain name %s' % item['name'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip domain name vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('ip domain name %s' % item['name'])
if want['domain_search']:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in removes:
if item['vrf']:
commands.append('no ip domain list vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('no ip domain list %s' % item['name'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip domain list vrf %s %s' % (item['vrf'], item['name']))
else:
commands.append('ip domain list %s' % item['name'])
if want['name_servers']:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in removes:
if item['vrf']:
commands.append('no ip name-server vrf %s %s' % (item['vrf'], item['server']))
else:
commands.append('no ip name-server %s' % item['server'])
for item in adds:
if item['vrf']:
requires_vrf(module, item['vrf'])
commands.append('ip name-server vrf %s %s' % (item['vrf'], item['server']))
else:
commands.append('ip name-server %s' % item['server'])
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.findall('^ip domain name (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
for vrf, name in match:
if not vrf:
vrf = None
matches.append({'name': name, 'vrf': vrf})
return matches
def parse_domain_search(config):
match = re.findall('^ip domain list (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
for vrf, name in match:
if not vrf:
vrf = None
matches.append({'name': name, 'vrf': vrf})
return matches
def parse_name_servers(config):
match = re.findall('^ip name-server (?:vrf (\S+) )*(\S+)', config, re.M)
matches = list()
for vrf, server in match:
if not vrf:
vrf = None
matches.append({'server': server, 'vrf': vrf})
return matches
def parse_lookup_source(config):
match = re.search('ip domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': parse_domain_search(config),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'no ip domain lookup' not in config,
'name_servers': parse_name_servers(config)
}
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
}
domain_name = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
domain_search = ComplexList(dict(
name=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict()
), module)
for arg, cast in [('domain_name', domain_name),
('domain_search', domain_search),
('name_servers', name_servers)]:
if module.params[arg]:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(type='list'),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(ios_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 | 8,775,157,290,306,269,000 | 32.026455 | 98 | 0.597244 | false |
abhishekmurthy/Calligra | sheets/plugins/scripting/scripts/csvexport.py | 7 | 4598 | #!/usr/bin/env kross
"""
Sheets python script to export an ISO OpenDocument spreadsheet file to
a comma-separated-value file.
(C)2007 Sebastian Sauer <[email protected]>
http://kross.dipe.org
http://www.calligra.org/sheets
Dual-licensed under LGPL v2+higher and the BSD license.
"""
import os, datetime, sys, traceback, csv
import Kross, KSpread
T = Kross.module("kdetranslation")
class CsvExporter:
def __init__(self, scriptaction):
self.scriptaction = scriptaction
self.currentpath = self.scriptaction.currentPath()
self.forms = Kross.module("forms")
self.dialog = self.forms.createDialog(T.i18n("CSV Export"))
self.dialog.setButtons("Ok|Cancel")
self.dialog.setFaceType("List") #Auto Plain List Tree Tabbed
savepage = self.dialog.addPage(T.i18nc("Options page name", "Save"),T.i18n("Export to CSV File"),"document-save")
self.savewidget = self.forms.createFileWidget(savepage, "kfiledialog:///kspreadcsvexportsave")
self.savewidget.setMode("Saving")
self.savewidget.setFilter("*.csv *.txt|%(1)s\n*|%(2)s" % { '1' : T.i18n("Comma-Separated-Value Files"), '2' : T.i18n("All Files") } )
datapage = self.dialog.addPage(T.i18nc("Options page name", "Export"),T.i18n("Export Sheets and ranges"),"document-export")
self.sheetslistview = KSpread.createSheetsListView(datapage)
self.sheetslistview.setSelectionType("MultiSelect")
self.sheetslistview.setEditorType("Range")
optionspage = self.dialog.addPage(T.i18n("Options"),T.i18n("Comma Separated Value Options"),"configure")
self.optionswidget = self.forms.createWidgetFromUIFile(optionspage, os.path.join(self.currentpath, "csvoptions.ui"))
if self.dialog.exec_loop():
try:
self.doExport()
except:
self.forms.showMessageBox("Error", T.i18n("Error"), "%s" % "".join( traceback.format_exception(sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2]) ))
def getCustomDialect(self):
class CustomDialect(csv.excel): pass
setattr(CustomDialect, 'delimiter', self.optionswidget["DelimiterComboBox"].currentText)
lineterm = self.optionswidget["LineTerminatorComboBox"].currentText.strip()
if lineterm == '\\n': setattr(CustomDialect, 'lineterminator', "\n")
elif lineterm == '\\t': setattr(CustomDialect, 'lineterminator', "\t")
elif lineterm == '\\r': setattr(CustomDialect, 'lineterminator', "\r")
elif lineterm == '\\n\\r': setattr(CustomDialect, 'lineterminator', "\n\r")
elif lineterm == '\\r\\n': setattr(CustomDialect, 'lineterminator', "\r\n")
else: setattr(CustomDialect, 'lineterminator', lineterm)
escapechar = self.optionswidget["EscapeCharComboBox"].currentText
if len(escapechar) != 1: escapechar = None
setattr(CustomDialect, 'escapechar', escapechar)
setattr(CustomDialect, 'doublequote', self.optionswidget["DoubleQuoteCheckBox"].checked)
quotechar = self.optionswidget["QuoteCharComboBox"].currentText
if len(quotechar) != 1: quotechar = '"'
setattr(CustomDialect, 'quotechar', quotechar)
setattr(CustomDialect, 'quoting', self.optionswidget["QuotingCheckBox"].checked)
setattr(CustomDialect, 'skipinitialspace', self.optionswidget["SkipInitialSpaceCheckBox"].checked)
setattr(CustomDialect, 'strict', self.optionswidget["StrictCheckBox"].checked)
return CustomDialect
def doExport(self):
reader = KSpread.reader()
reader.setSheets( self.sheetslistview.sheets() )
#if len(reader.sheetNames()) == 0:
#raise "No sheet to export selected"
csvfilename = self.savewidget.selectedFile()
if not csvfilename:
raise Exception, T.i18n("No CSV file chosen")
if os.path.splitext(csvfilename)[1] == '':
csvfilename += '.csv'
csv.register_dialect("custom", self.getCustomDialect())
csvfile = open(csvfilename,'w')
csvwriter = csv.writer(csvfile, dialect="custom")
def changedSheet(sheetname):
print "changedSheet sheetname=%s" % sheetname
#csvfile.write("# %s\n" % sheetname)
def changedRow(row):
values = reader.currentValues()
#print "changedRow row=%i values=%s" % (row,values)
csvwriter.writerow(values)
reader.connect("changedSheet(QString)",changedSheet)
reader.connect("changedRow(int)",changedRow)
reader.start()
csvfile.close()
CsvExporter( self )
| gpl-2.0 | -4,655,856,934,443,430,000 | 44.078431 | 168 | 0.66007 | false |
Missuor/StartWithPython | example/django/mysite/polls/urls.py | 1 | 1362 | ##1
# from django.conf.urls import patterns, include, url
# from django.contrib import admin
#
# urlpatterns = patterns('',
# url(r'^polls/', include('polls.urls')),
# url(r'^admin/', include(admin.site.urls)),
# )
##2
# from django.conf.urls import patterns, url
#
# from polls import views
#
# urlpatterns = patterns('',
# # ex: /polls/
# url(r'^$', views.index, name='index'),
# # ex: /polls/5/
# url(r'^(?P<question_id>\d+)/$', views.detail, name='detail'),
# # ex: /polls/5/results/
# url(r'^(?P<question_id>\d+)/results/$', views.results, name='results'),
# # ex: /polls/5/vote/
# url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
#
# # the 'name' value as called by the {% url %} template tag
# url(r'^(?P<question_id>\d+)/$', views.detail, name='detail'),
#
# # added the word 'specifics'
# url(r'^specifics/(?P<question_id>\d+)/$', views.detail, name='detail'),
#
# )
##3 --> views.py ##6
from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<question_id>\d+)/vote/$', views.vote, name='vote'),
)
| gpl-2.0 | -5,812,133,492,228,304,000 | 29.954545 | 80 | 0.577827 | false |
lscheinkman/nupic | tests/unit/nupic/algorithms/spatial_pooler_py_api_test.py | 9 | 9808 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from mock import Mock, patch, ANY, call
import numpy
import cPickle as pickle
import unittest2 as unittest
from nupic.bindings.math import GetNTAReal
from nupic.bindings.algorithms import SpatialPooler
realType = GetNTAReal()
uintType = "uint32"
class SpatialPoolerAPITest(unittest.TestCase):
"""Tests for SpatialPooler public API"""
def setUp(self):
self.sp = SpatialPooler(columnDimensions=[5], inputDimensions=[5])
def testCompute(self):
# Check that there are no errors in call to compute
inputVector = numpy.ones(5)
activeArray = numpy.zeros(5)
self.sp.compute(inputVector, True, activeArray)
def testGetUpdatePeriod(self):
inParam = 1234
self.sp.setUpdatePeriod(inParam)
outParam = self.sp.getUpdatePeriod()
self.assertEqual(inParam, outParam)
def testGetPotentialRadius(self):
inParam = 56
self.sp.setPotentialRadius(inParam)
outParam = self.sp.getPotentialRadius()
self.assertEqual(inParam, outParam)
def testGetPotentialPct(self):
inParam = 0.4
self.sp.setPotentialPct(inParam)
outParam = self.sp.getPotentialPct()
self.assertAlmostEqual(inParam, outParam)
def testGetGlobalInhibition(self):
inParam = True
self.sp.setGlobalInhibition(inParam)
outParam = self.sp.getGlobalInhibition()
self.assertEqual(inParam, outParam)
inParam = False
self.sp.setGlobalInhibition(inParam)
outParam = self.sp.getGlobalInhibition()
self.assertEqual(inParam, outParam)
def testGetNumActiveColumnsPerInhArea(self):
inParam = 7
self.sp.setNumActiveColumnsPerInhArea(inParam)
outParam = self.sp.getNumActiveColumnsPerInhArea()
self.assertEqual(inParam, outParam)
def testGetLocalAreaDensity(self):
inParam = 0.4
self.sp.setLocalAreaDensity(inParam)
outParam = self.sp.getLocalAreaDensity()
self.assertAlmostEqual(inParam, outParam)
def testGetStimulusThreshold(self):
inParam = 89
self.sp.setStimulusThreshold(inParam)
outParam = self.sp.getStimulusThreshold()
self.assertEqual(inParam, outParam)
def testGetInhibitionRadius(self):
inParam = 4
self.sp.setInhibitionRadius(inParam)
outParam = self.sp.getInhibitionRadius()
self.assertEqual(inParam, outParam)
def testGetDutyCyclePeriod(self):
inParam = 2020
self.sp.setDutyCyclePeriod(inParam)
outParam = self.sp.getDutyCyclePeriod()
self.assertEqual(inParam, outParam)
def testGetBoostStrength(self):
inParam = 78
self.sp.setBoostStrength(inParam)
outParam = self.sp.getBoostStrength()
self.assertEqual(inParam, outParam)
def testGetIterationNum(self):
inParam = 999
self.sp.setIterationNum(inParam)
outParam = self.sp.getIterationNum()
self.assertEqual(inParam, outParam)
def testGetIterationLearnNum(self):
inParam = 666
self.sp.setIterationLearnNum(inParam)
outParam = self.sp.getIterationLearnNum()
self.assertEqual(inParam, outParam)
def testGetSpVerbosity(self):
inParam = 2
self.sp.setSpVerbosity(inParam)
outParam = self.sp.getSpVerbosity()
self.assertEqual(inParam, outParam)
def testGetSynPermTrimThreshold(self):
inParam = 0.7
self.sp.setSynPermTrimThreshold(inParam)
outParam = self.sp.getSynPermTrimThreshold()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermActiveInc(self):
inParam = 0.567
self.sp.setSynPermActiveInc(inParam)
outParam = self.sp.getSynPermActiveInc()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermInactiveDec(self):
inParam = 0.123
self.sp.setSynPermInactiveDec(inParam)
outParam = self.sp.getSynPermInactiveDec()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermBelowStimulusInc(self):
inParam = 0.0898
self.sp.setSynPermBelowStimulusInc(inParam)
outParam = self.sp.getSynPermBelowStimulusInc()
self.assertAlmostEqual(inParam, outParam)
def testGetSynPermConnected(self):
inParam = 0.514
self.sp.setSynPermConnected(inParam)
outParam = self.sp.getSynPermConnected()
self.assertAlmostEqual(inParam, outParam)
def testGetMinPctOverlapDutyCycles(self):
inParam = 0.11122
self.sp.setMinPctOverlapDutyCycles(inParam)
outParam = self.sp.getMinPctOverlapDutyCycles()
self.assertAlmostEqual(inParam, outParam)
def testGetPermanence(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType)
self.sp.setPermanence(0,inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getPermanence(0, outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetBoostFactors(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([1, 1.2, 1.3, ]).astype(realType)
self.sp.setBoostFactors(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getBoostFactors(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetOverlapDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.9, 0.3, 0.1]).astype(realType)
self.sp.setOverlapDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getOverlapDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetActiveDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.9, 0.99, 0.999, ]).astype(realType)
self.sp.setActiveDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getActiveDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetMinOverlapDutyCycles(self):
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam = numpy.array([0.01, 0.02, 0.035, ]).astype(realType)
self.sp.setMinOverlapDutyCycles(inParam)
outParam = numpy.zeros(numInputs).astype(realType)
self.sp.getMinOverlapDutyCycles(outParam)
self.assertListEqual(list(inParam),list(outParam))
def testGetPotential(self):
self.sp.initialize(columnDimensions=[3], inputDimensions=[3])
numInputs = 3
numColumns = 3
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns])
inParam1 = numpy.array([1, 0, 1]).astype(uintType)
self.sp.setPotential(0, inParam1)
inParam2 = numpy.array([1, 1, 0]).astype(uintType)
self.sp.setPotential(1, inParam2)
outParam1 = numpy.zeros(numInputs).astype(uintType)
outParam2 = numpy.zeros(numInputs).astype(uintType)
self.sp.getPotential(0, outParam1)
self.sp.getPotential(1, outParam2)
self.assertListEqual(list(inParam1),list(outParam1))
self.assertListEqual(list(inParam2),list(outParam2))
def testGetConnectedSynapses(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.13]).astype(realType)
trueConnected = numpy.array([0, 0, 0, 1, 1])
self.sp.setSynPermConnected(0.1)
self.sp.setPermanence(0,inParam)
outParam = numpy.zeros(numInputs).astype(uintType)
self.sp.getConnectedSynapses(0, outParam)
self.assertListEqual(list(trueConnected),list(outParam))
def testGetConnectedCounts(self):
numInputs = 5
numColumns = 5
self.sp.initialize(columnDimensions=[numInputs],
inputDimensions=[numColumns],
potentialRadius=1,
potentialPct=1)
inParam = numpy.array(
[0.06, 0.07, 0.08, 0.12, 0.11]).astype(realType)
trueConnectedCount = 2
self.sp.setSynPermConnected(0.1)
self.sp.setPermanence(0, inParam)
outParam = numpy.zeros(numInputs).astype(uintType)
self.sp.getConnectedCounts(outParam)
self.assertEqual(trueConnectedCount, outParam[0])
def assertListAlmostEqual(self, alist, blist):
self.assertEqual(len(alist), len(blist))
for (a,b) in zip(alist,blist):
diff = abs(a - b)
self.assertLess(diff,1e-5)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 3,628,852,379,972,284,400 | 30.235669 | 72 | 0.696676 | false |
google/grr | grr/server/grr_response_server/databases/mysql_utils_test.py | 1 | 2367 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from absl import app
from absl.testing import absltest
from grr_response_server.databases import mysql_utils
from grr.test_lib import test_lib
class DocTest(test_lib.DocTest):
module = mysql_utils
class PlaceholdersTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.Placeholders(0), "()")
def testOne(self):
self.assertEqual(mysql_utils.Placeholders(1), "(%s)")
def testMany(self):
self.assertEqual(mysql_utils.Placeholders(4), "(%s, %s, %s, %s)")
def testZeroValues(self):
self.assertEqual(mysql_utils.Placeholders(3, 0), "")
def testManyValues(self):
self.assertEqual(
mysql_utils.Placeholders(3, 2), "(%s, %s, %s), (%s, %s, %s)")
class NamedPlaceholdersTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.NamedPlaceholders([]), "()")
def testOne(self):
self.assertEqual(mysql_utils.NamedPlaceholders(["foo"]), "(%(foo)s)")
def testMany(self):
self.assertEqual(
mysql_utils.NamedPlaceholders(["bar", "baz", "foo"]),
"(%(bar)s, %(baz)s, %(foo)s)")
def testDictUsesKeys(self):
self.assertIn(
mysql_utils.NamedPlaceholders({
"bar": 42,
"baz": 42,
"foo": 42
}), ["(%(bar)s, %(baz)s, %(foo)s)"])
def testSortsNames(self):
self.assertEqual(
mysql_utils.NamedPlaceholders(["bar", "foo", "baz"]),
"(%(bar)s, %(baz)s, %(foo)s)")
class ColumnsTest(absltest.TestCase):
def testEmpty(self):
self.assertEqual(mysql_utils.Columns([]), "()")
def testOne(self):
self.assertEqual(mysql_utils.Columns(["foo"]), "(`foo`)")
def testMany(self):
self.assertEqual(
mysql_utils.Columns(["bar", "baz", "foo"]), "(`bar`, `baz`, `foo`)")
def testDictUsesKeys(self):
self.assertIn(
mysql_utils.Columns({
"bar": 42,
"baz": 42,
"foo": 42
}), ["(`bar`, `baz`, `foo`)"])
def testSortsNames(self):
self.assertEqual(
mysql_utils.Columns(["bar", "foo", "baz"]), "(`bar`, `baz`, `foo`)")
def testSortsRawNamesWithoutEscape(self):
self.assertGreater("`", "_")
self.assertEqual(mysql_utils.Columns(["a", "a_hash"]), "(`a`, `a_hash`)")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 6,338,833,705,529,564,000 | 23.915789 | 77 | 0.598648 | false |
wujuguang/sentry | src/sentry/api/endpoints/organization_member_team_details.py | 22 | 6846 | from __future__ import absolute_import
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission
)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.models import (
AuditLogEntryEvent, OrganizationAccessRequest,
OrganizationMember, OrganizationMemberTeam, Team
)
ERR_INSUFFICIENT_ROLE = 'You cannot modify a member other than yourself.'
class OrganizationMemberTeamSerializer(serializers.Serializer):
isActive = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
scope_map = {
'GET': ['org:read', 'org:write', 'org:delete'],
'POST': ['org:read', 'org:write', 'org:delete'],
'PUT': ['org:read', 'org:write', 'org:delete'],
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': ['org:read', 'org:write', 'org:delete'],
}
class OrganizationMemberTeamDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _can_access(self, request, member):
# TODO(dcramer): ideally org owners/admins could perform these actions
if request.user.is_superuser:
return True
if not request.user.is_authenticated():
return False
if request.user.id == member.user_id:
return True
return False
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
)
else:
queryset = OrganizationMember.objects.filter(
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def post(self, request, organization, member_id, team_slug):
"""
Join a team
Join or request access to a team.
If the user is already a member of the team, this will simply return
a 204.
If the user needs permission to join the team, an access request will
be generated and the returned status code will be 202.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
if not om.has_global_access:
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
# TODO(dcramer): this should create a pending request and
# return a 202
if not organization.flags.allow_joinleave:
omt, created = OrganizationAccessRequest.objects.get_or_create(
team=team,
member=om,
)
if created:
omt.send_request_email()
return Response(status=202)
omt = OrganizationMemberTeam(
team=team,
organizationmember=om,
is_active=False,
)
if omt.is_active:
return Response(status=204)
else:
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
# if the relationship doesnt exist, they're already a member
return Response(status=204)
omt.is_active = True
omt.save()
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(
team, request.user, TeamWithProjectsSerializer()), status=201)
def delete(self, request, organization, member_id, team_slug):
"""
Leave a team
Leave a team.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
if not om.has_global_access:
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
# if the relationship doesnt exist, they're already a member
return Response(serialize(
team, request.user, TeamWithProjectsSerializer()), status=200)
else:
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
is_active=True,
)
except OrganizationMemberTeam.DoesNotExist:
omt = OrganizationMemberTeam(
team=team,
organizationmember=om,
is_active=True,
)
if omt.is_active:
omt.is_active = False
omt.save()
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_LEAVE_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(
team, request.user, TeamWithProjectsSerializer()), status=200)
| bsd-3-clause | 6,463,097,243,049,706,000 | 32.558824 | 83 | 0.567193 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.