repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
lipixun/pytest | rabbitmq/deadchannel/going2dead.py | 1 | 2112 | #!/usr/bin/env python
# encoding=utf8
# The dead channel applicationn
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from uuid import uuid4
from time import time, sleep
from haigha.connections.rabbit_connection import RabbitConnection
from haigha.message import Message
class Client(object):
"""The RPC Client
"""
def __init__(self, host, port, vhost, user, password):
"""Create a new Server
"""
self._conn = RabbitConnection(host = host, port = port, vhost = vhost, user = user, password = password)
self._channel = self._conn.channel()
result = self._channel.queue.declare(arguments = { 'x-dead-letter-exchange': 'amq.topic', 'x-dead-letter-routing-key': 'test.dead_channel' })
self._deadQueue = result[0]
# Send a message
self._channel.basic.publish(Message('OMG! I\'m dead!'), '', self._deadQueue)
def dead(self):
"""Normal dead
"""
self._channel.close()
if __name__ == '__main__':
from argparse import ArgumentParser
def getArguments():
"""Get arguments
"""
parser = ArgumentParser(description = 'RabbitMQ dead channel client')
parser.add_argument('--host', dest = 'host', required = True, help = 'The host')
parser.add_argument('--port', dest = 'port', default = 5672, type = int, help = 'The port')
parser.add_argument('--vhost', dest = 'vhost', default = '/test', help = 'The virtual host')
parser.add_argument('--user', dest = 'user', default = 'test', help = 'The user name')
parser.add_argument('--password', dest = 'password', default = 'test', help = 'The password')
# Done
return parser.parse_args()
def main():
"""The main entry
"""
args = getArguments()
# Create the server
client = Client(args.host, args.port, args.vhost, args.user, args.password)
# Go to dead
print 'Will go to dead in 10s, or you can use ctrl + c to cause a unexpected death'
sleep(10)
client.dead()
print 'Normal dead'
main()
| gpl-2.0 | -501,589,182,561,028,100 | -1,066,335,480,371,074,300 | 33.064516 | 149 | 0.606061 | false |
Gustry/inasafe | safe/gui/tools/multi_buffer_dialog.py | 3 | 11681 | # coding=utf-8
"""**Multi Buffer Tool Implementation.**"""
import logging
import os
from collections import OrderedDict
from operator import itemgetter
from qgis.core import QgsMapLayerRegistry
from qgis.gui import QgsMapLayerProxyModel
from PyQt4 import QtGui
from PyQt4.QtCore import pyqtSignature, pyqtSlot
from PyQt4.QtGui import QFileDialog, QIcon
from safe.common.utilities import unique_filename, temp_dir
from safe.datastore.folder import Folder
from safe.gis.vector.multi_buffering import multi_buffering
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
from safe.gui.tools.help.multi_buffer_help import multi_buffer_help
from safe.messaging import styles
from safe.utilities.resources import (
get_ui_class,
resources_path,
html_footer,
html_header)
INFO_STYLE = styles.BLUE_LEVEL_4_STYLE
LOGGER = logging.getLogger('InaSAFE')
FORM_CLASS = get_ui_class('multi_buffer_dialog_base.ui')
class MultiBufferDialog(QtGui.QDialog, FORM_CLASS):
"""Dialog implementation class for the InaSAFE multi buffer tool."""
def __init__(self, parent=None, iface=None, dock_widget=None):
"""Constructor for the multi buffer dialog.
:param parent: Parent widget of this dialog.
:type parent: QWidget
"""
QtGui.QDialog.__init__(self, parent)
self.setupUi(self)
self.setWindowTitle(self.tr('InaSAFE Multi Buffer Tool'))
self.parent = parent
self.iface = iface
self.dock_widget = dock_widget
self.keyword_wizard = None
# output file properties initialisation
self.data_store = None
self.output_directory = None
self.output_filename = None
self.output_extension = None
self.output_layer = None
self.classification = []
# set icon
self.add_class_button.setIcon(
QIcon(resources_path('img', 'icons', 'add.svg')))
self.remove_class_button.setIcon(
QIcon(resources_path('img', 'icons', 'remove.svg')))
# prepare dialog initialisation
self.layer.setFilters(QgsMapLayerProxyModel.VectorLayer)
self.directory_button_status()
self.add_class_button_status()
self.ok_button_status()
self.output_form.setPlaceholderText(
self.tr('[Create a temporary layer]'))
self.keyword_wizard_checkbox.setChecked(True)
# set signal
self.layer.layerChanged.connect(self.directory_button_status)
self.layer.layerChanged.connect(self.ok_button_status)
self.output_form.textChanged.connect(self.ok_button_status)
self.directory_button.clicked.connect(
self.on_directory_button_tool_clicked)
self.radius_form.valueChanged.connect(self.add_class_button_status)
self.class_form.textChanged.connect(self.add_class_button_status)
self.add_class_button.clicked.connect(
self.populate_hazard_classification)
self.add_class_button.clicked.connect(self.ok_button_status)
self.remove_class_button.clicked.connect(
self.remove_selected_classification)
self.remove_class_button.clicked.connect(self.ok_button_status)
# Set up things for context help
self.help_button = self.button_box.button(QtGui.QDialogButtonBox.Help)
# Allow toggling the help button
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self.help_toggled)
self.main_stacked_widget.setCurrentIndex(1)
# Fix for issue 1699 - cancel button does nothing
cancel_button = self.button_box.button(QtGui.QDialogButtonBox.Cancel)
cancel_button.clicked.connect(self.reject)
# Fix ends
ok_button = self.button_box.button(QtGui.QDialogButtonBox.Ok)
ok_button.clicked.connect(self.accept)
def accept(self):
"""Process the layer for multi buffering and generate a new layer.
.. note:: This is called on OK click.
"""
# set parameter from dialog
input_layer = self.layer.currentLayer()
output_path = self.output_form.text()
radius = self.get_classification()
# monkey patch keywords so layer works on multi buffering function
input_layer.keywords = {'inasafe_fields': {}}
# run multi buffering
self.output_layer = multi_buffering(input_layer, radius)
# save output layer to data store and check whether user
# provide the output path.
if output_path:
self.output_directory, self.output_filename = (
os.path.split(output_path))
self.output_filename, self.output_extension = (
os.path.splitext(self.output_filename))
# if user do not provide the output path, create a temporary file.
else:
self.output_directory = temp_dir(sub_dir='work')
self.output_filename = (
unique_filename(
prefix='hazard_layer',
suffix='.geojson',
dir=self.output_directory))
self.output_filename = os.path.split(self.output_filename)[1]
self.output_filename, self.output_extension = (
os.path.splitext(self.output_filename))
self.data_store = Folder(self.output_directory)
if self.output_extension == '.shp':
self.data_store.default_vector_format = 'shp'
elif self.output_extension == '.geojson':
self.data_store.default_vector_format = 'geojson'
self.data_store.add_layer(self.output_layer, self.output_filename)
# add output layer to map canvas
self.output_layer = self.data_store.layer(self.output_filename)
QgsMapLayerRegistry.instance().addMapLayers(
[self.output_layer])
self.iface.setActiveLayer(self.output_layer)
self.iface.zoomToActiveLayer()
self.done(QtGui.QDialog.Accepted)
if self.keyword_wizard_checkbox.isChecked():
self.launch_keyword_wizard()
@pyqtSignature('') # prevents actions being handled twice
def on_directory_button_tool_clicked(self):
"""Autoconnect slot activated when directory button is clicked."""
# noinspection PyCallByClass,PyTypeChecker
# set up parameter from dialog
input_path = self.layer.currentLayer().source()
input_directory, self.output_filename = os.path.split(input_path)
file_extension = os.path.splitext(self.output_filename)[1]
self.output_filename = os.path.splitext(self.output_filename)[0]
# show Qt file directory dialog
output_path = QFileDialog.getSaveFileName(
self,
self.tr('Output file'),
'%s_multi_buffer%s' % (
os.path.join(input_directory, self.output_filename),
file_extension),
'GeoJSON (*.geojson);;Shapefile (*.shp)')
# set selected path to the dialog
self.output_form.setText(output_path)
def get_output_from_input(self):
"""Populate output form with default output path based on input layer.
"""
input_path = self.layer.currentLayer().source()
output_path = (
os.path.splitext(input_path)[0] + '_multi_buffer' +
os.path.splitext(input_path)[1])
self.output_form.setText(output_path)
def populate_hazard_classification(self):
"""Populate hazard classification on hazard class form."""
new_class = {
'value': self.radius_form.value(),
'name': self.class_form.text()}
self.classification.append(new_class)
self.classification = sorted(
self.classification, key=itemgetter('value'))
self.hazard_class_form.clear()
for item in self.classification:
new_item = '{value} - {name}'.format(
value=item['value'], name=item['name'])
self.hazard_class_form.addItem(new_item)
self.radius_form.setValue(0)
self.class_form.clear()
self.ok_button_status()
def remove_selected_classification(self):
"""Remove selected item on hazard class form."""
removed_classes = self.hazard_class_form.selectedItems()
current_item = self.hazard_class_form.currentItem()
removed_index = self.hazard_class_form.indexFromItem(current_item)
del self.classification[removed_index.row()]
for item in removed_classes:
self.hazard_class_form.takeItem(
self.hazard_class_form.row(item))
def get_classification(self):
"""Get all hazard class created by user.
:return: Hazard class definition created by user.
:rtype: OrderedDict
"""
classification_dictionary = {}
for item in self.classification:
classification_dictionary[item['value']] = item['name']
classification_dictionary = OrderedDict(
sorted(classification_dictionary.items()))
return classification_dictionary
def directory_button_status(self):
"""Function to enable or disable directory button."""
if self.layer.currentLayer():
self.directory_button.setEnabled(True)
else:
self.directory_button.setEnabled(False)
def add_class_button_status(self):
"""Function to enable or disable add class button."""
if self.class_form.text() and self.radius_form >= 0:
self.add_class_button.setEnabled(True)
else:
self.add_class_button.setEnabled(False)
def ok_button_status(self):
"""Function to enable or disable OK button."""
if not self.layer.currentLayer():
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
elif (self.hazard_class_form.count() > 0 and
self.layer.currentLayer().name() and
len(self.output_form.text()) >= 0):
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(True)
else:
self.button_box.button(QtGui.QDialogButtonBox.Ok).setEnabled(False)
@pyqtSlot()
@pyqtSignature('bool') # prevents actions being handled twice
def help_toggled(self, flag):
"""Show or hide the help tab in the stacked widget.
:param flag: Flag indicating whether help should be shown or hidden.
:type flag: bool
"""
if flag:
self.help_button.setText(self.tr('Hide Help'))
self.show_help()
else:
self.help_button.setText(self.tr('Show Help'))
self.hide_help()
def hide_help(self):
"""Hide the usage info from the user."""
self.main_stacked_widget.setCurrentIndex(1)
def show_help(self):
"""Show usage info to the user."""
# Read the header and footer html snippets
self.main_stacked_widget.setCurrentIndex(0)
header = html_header()
footer = html_footer()
string = header
message = multi_buffer_help()
string += message.to_html()
string += footer
self.help_web_view.setHtml(string)
def launch_keyword_wizard(self):
"""Launch keyword creation wizard."""
# make sure selected layer is the output layer
if self.iface.activeLayer() != self.output_layer:
return
# launch wizard dialog
self.keyword_wizard = WizardDialog(
self.iface.mainWindow(),
self.iface,
self.dock_widget)
self.keyword_wizard.set_keywords_creation_mode(self.output_layer)
self.keyword_wizard.exec_() # modal
| gpl-3.0 | -7,443,346,947,412,166,000 | -3,547,316,125,425,169,000 | 38.06689 | 79 | 0.637873 | false |
gf712/AbPyTools | abpytools/core/fab_collection.py | 1 | 14123 | from .chain_collection import ChainCollection
import numpy as np
import pandas as pd
from .chain import calculate_charge
from abpytools.utils import DataLoader
from operator import itemgetter
from .fab import Fab
from .helper_functions import germline_identity_pd, to_numbering_table
from .base import CollectionBase
import os
import json
from .utils import (json_FabCollection_formatter, pb2_FabCollection_formatter, pb2_FabCollection_parser,
json_FabCollection_parser)
from .flags import *
if BACKEND_FLAGS.HAS_PROTO:
from abpytools.core.formats import FabCollectionProto
class FabCollection(CollectionBase):
def __init__(self, fab=None, heavy_chains=None, light_chains=None, names=None):
"""
Fab object container that handles combinations of light/heavy Chain pairs.
Args:
fab (list):
heavy_chains (ChainCollection):
light_chains (ChainCollection):
names (list):
"""
# check if it's a Chain object
if heavy_chains is None and light_chains is None and fab is None:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
# check if fab object is a list and if all object are abpytools.Fab objects
if isinstance(fab, list) and all(isinstance(fab_i, Fab) for fab_i in fab):
self._fab = fab
self._light_chains = ChainCollection([x[0] for x in self._fab])
self._heavy_chains = ChainCollection([x[1] for x in self._fab])
if fab is None and (heavy_chains is not None and light_chains is not None):
if isinstance(heavy_chains, list):
self._heavy_chains = ChainCollection(antibody_objects=heavy_chains)
elif isinstance(heavy_chains, ChainCollection):
self._heavy_chains = heavy_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if isinstance(light_chains, list):
self._light_chains = ChainCollection(antibody_objects=light_chains)
elif isinstance(light_chains, ChainCollection):
self._light_chains = light_chains
else:
raise ValueError('Provide a list of Chain objects or an ChainCollection object')
if len(self._light_chains.loading_status()) == 0:
self._light_chains.load()
if len(self._heavy_chains.loading_status()) == 0:
self._heavy_chains.load()
if self._light_chains.n_ab != self._heavy_chains.n_ab:
raise ValueError('Number of heavy chains must be the same of light chains')
if isinstance(names, list) and all(isinstance(name, str) for name in names):
if len(names) == self._heavy_chains.n_ab:
self._names = names
else:
raise ValueError(
'Length of name list must be the same as length of heavy_chains/light chains lists')
elif names is None:
self._names = ['{} - {}'.format(heavy, light) for heavy, light in zip(self._heavy_chains.names,
self._light_chains.names)]
else:
raise ValueError("Names expected a list of strings, instead got {}".format(type(names)))
self._n_ab = self._light_chains.n_ab
self._pair_sequences = [heavy + light for light, heavy in zip(self._heavy_chains.sequences,
self._light_chains.sequences)]
# keep the name of the heavy and light chains internally to keep everything in the right order
self._internal_heavy_name = self._heavy_chains.names
self._internal_light_name = self._light_chains.names
# even though it makes more sense to draw all these values from the base Fab objects this is much slower
# whenever self._n_ab > 1 it makes more sense to use the self._heavy_chain and self._light_chain containers
# in all the methods
# in essence the abpytools.Fab object is just a representative building block that could in future just
# cache data and would then represent a speed up in the calculations
def molecular_weights(self, monoisotopic=False):
return [heavy + light for heavy, light in zip(self._heavy_chains.molecular_weights(monoisotopic=monoisotopic),
self._light_chains.molecular_weights(monoisotopic=monoisotopic))]
def extinction_coefficients(self, extinction_coefficient_database='Standard', reduced=False, normalise=False,
**kwargs):
heavy_ec = self._heavy_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
light_ec = self._light_chains.extinction_coefficients(
extinction_coefficient_database=extinction_coefficient_database,
reduced=reduced)
if normalise:
return [(heavy + light) / mw for heavy, light, mw in
zip(heavy_ec, light_ec, self.molecular_weights(**kwargs))]
else:
return [heavy + light for heavy, light in zip(heavy_ec, light_ec)]
def hydrophobicity_matrix(self):
return np.column_stack((self._heavy_chains.hydrophobicity_matrix(), self._light_chains.hydrophobicity_matrix()))
def charge(self):
return np.column_stack((self._heavy_chains.charge, self._light_chains.charge))
def total_charge(self, ph=7.4, pka_database='Wikipedia'):
available_pi_databases = ["EMBOSS", "DTASetect", "Solomon", "Sillero", "Rodwell", "Wikipedia", "Lehninger",
"Grimsley"]
assert pka_database in available_pi_databases, \
"Selected pI database {} not available. Available databases: {}".format(pka_database,
' ,'.join(available_pi_databases))
data_loader = DataLoader(data_type='AminoAcidProperties', data=['pI', pka_database])
pka_data = data_loader.get_data()
return [calculate_charge(sequence=seq, ph=ph, pka_values=pka_data) for seq in self.sequences]
def igblast_local_query(self, file_path, chain):
if chain.lower() == 'light':
self._light_chains.igblast_local_query(file_path=file_path)
elif chain.lower() == 'heavy':
self._heavy_chains.igblast_local_query(file_path=file_path)
else:
raise ValueError('Specify if the data being loaded is for the heavy or light chain')
def igblast_server_query(self, **kwargs):
self._light_chains.igblast_server_query(**kwargs)
self._heavy_chains.igblast_server_query(**kwargs)
def numbering_table(self, as_array=False, region='all', chain='both', **kwargs):
return to_numbering_table(as_array=as_array, region=region, chain=chain,
heavy_chains_numbering_table=self._heavy_chains.numbering_table,
light_chains_numbering_table=self._light_chains.numbering_table,
names=self.names, **kwargs)
def _germline_pd(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
heavy_chain_germlines = self._heavy_chains.germline
light_chain_germlines = self._light_chains.germline
data = np.array([[heavy_chain_germlines[x][0] for x in self._internal_heavy_name],
[heavy_chain_germlines[x][1] for x in self._internal_heavy_name],
[light_chain_germlines[x][0] for x in self._internal_light_name],
[light_chain_germlines[x][1] for x in self._internal_light_name]]).T
df = pd.DataFrame(data=data,
columns=pd.MultiIndex.from_tuples([('Heavy', 'Assignment'),
('Heavy', 'Score'),
('Light', 'Assignment'),
('Light', 'Score')]),
index=self.names)
df.loc[:, (slice(None), 'Score')] = df.loc[:, (slice(None), 'Score')].apply(pd.to_numeric)
return df
def save_to_json(self, path, update=True):
with open(os.path.join(path + '.json'), 'w') as f:
fab_data = json_FabCollection_formatter(self)
json.dump(fab_data, f, indent=2)
def save_to_pb2(self, path, update=True):
proto_parser = FabCollectionProto()
try:
with open(os.path.join(path + '.pb2'), 'rb') as f:
proto_parser.ParseFromString(f.read())
except IOError:
# Creating new file
pass
pb2_FabCollection_formatter(self, proto_parser)
with open(os.path.join(path + '.pb2'), 'wb') as f:
f.write(proto_parser.SerializeToString())
def save_to_fasta(self, path, update=True):
raise NotImplementedError
@classmethod
def load_from_json(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'r') as f:
data = json.load(f)
fab_objects = json_FabCollection_parser(data)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_pb2(cls, path, n_threads=20, verbose=True, show_progressbar=True):
with open(path, 'rb') as f:
proto_parser = FabCollectionProto()
proto_parser.ParseFromString(f.read())
fab_objects = pb2_FabCollection_parser(proto_parser)
fab_collection = cls(fab=fab_objects)
return fab_collection
@classmethod
def load_from_fasta(cls, path, numbering_scheme=NUMBERING_FLAGS.CHOTHIA, n_threads=20,
verbose=True, show_progressbar=True):
raise NotImplementedError
def _get_names_iter(self, chain='both'):
if chain == 'both':
for light_chain, heavy_chain in zip(self._light_chains, self._heavy_chains):
yield f"{light_chain.name}-{heavy_chain.name}"
elif chain == 'light':
for light_chain in self._light_chains:
yield light_chain.name
elif chain == 'heavy':
for heavy_chain in self._heavy_chains:
yield heavy_chain.name
else:
raise ValueError(f"Unknown chain type ({chain}), available options are:"
f"both, light or heavy.")
@property
def regions(self):
heavy_regions = self._heavy_chains.ab_region_index()
light_regions = self._light_chains.ab_region_index()
return {name: {CHAIN_FLAGS.HEAVY_CHAIN: heavy_regions[heavy],
CHAIN_FLAGS.LIGHT_CHAIN: light_regions[light]} for name, heavy, light in
zip(self.names, self._internal_heavy_name, self._internal_light_name)}
@property
def names(self):
return self._names
@property
def sequences(self):
return self._pair_sequences
@property
def aligned_sequences(self):
return [heavy + light for light, heavy in
zip(self._heavy_chains.aligned_sequences,
self._light_chains.aligned_sequences)]
@property
def n_ab(self):
return self._n_ab
@property
def germline_identity(self):
return self._germline_identity()
@property
def germline(self):
return self._germline_pd()
def _string_summary_basic(self):
return "abpytools.FabCollection Number of sequences: {}".format(self._n_ab)
def __len__(self):
return self._n_ab
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def __getitem__(self, indices):
if isinstance(indices, int):
return Fab(heavy_chain=self._heavy_chains[indices],
light_chain=self._light_chains[indices],
name=self.names[indices], load=False)
else:
return FabCollection(heavy_chains=list(itemgetter(*indices)(self._heavy_chains)),
light_chains=list(itemgetter(*indices)(self._light_chains)),
names=list(itemgetter(*indices)(self._names)))
def _germline_identity(self):
# empty dictionaries return false, so this condition checks if any of the values are False
if all([x for x in self._light_chains.germline_identity.values()]) is False:
# this means there is no information about the germline,
# by default it will run a web query
self._light_chains.igblast_server_query()
if all([x for x in self._heavy_chains.germline_identity.values()]) is False:
self._heavy_chains.igblast_server_query()
return germline_identity_pd(self._heavy_chains.germline_identity,
self._light_chains.germline_identity,
self._internal_heavy_name,
self._internal_light_name,
self._names)
def get_object(self, name):
"""
:param name: str
:return:
"""
if name in self.names:
index = self.names.index(name)
return self[index]
else:
raise ValueError('Could not find sequence with specified name')
| mit | -1,408,376,348,849,004,800 | 7,538,993,061,892,582,000 | 41.158209 | 120 | 0.593075 | false |
tartavull/google-cloud-python | bigtable/tests/unit/test_client.py | 2 | 24528 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
class _CredentialsWithScopes(
google.auth.credentials.Credentials,
google.auth.credentials.Scoped):
pass
return mock.Mock(spec=_CredentialsWithScopes)
class Test__make_data_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_data_stub
return _make_data_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_pb2.BigtableStub,
MUT.DATA_API_HOST,
extra_options=MUT._GRPC_MAX_LENGTH_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_pb2.BigtableStub,
emulator_host,
),
])
class Test__make_instance_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_instance_stub
return _make_instance_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub,
MUT.INSTANCE_ADMIN_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_instance_admin_pb2.BigtableInstanceAdminStub,
emulator_host,
),
])
class Test__make_operations_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_operations_stub
return _make_operations_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.longrunning import operations_grpc
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
operations_grpc.OperationsStub,
MUT.OPERATIONS_API_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.longrunning import operations_grpc
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
operations_grpc.OperationsStub,
emulator_host,
),
])
class Test__make_table_stub(unittest.TestCase):
def _call_fut(self, client):
from google.cloud.bigtable.client import _make_table_stub
return _make_table_stub(client)
@mock.patch('google.cloud.bigtable.client.make_secure_stub',
return_value=mock.sentinel.stub)
def test_without_emulator(self, make_stub):
from google.cloud.bigtable import client as MUT
credentials = _make_credentials()
user_agent = 'you-sir-age-int'
client = _Client(credentials, user_agent)
result = self._call_fut(client)
self.assertIs(result, mock.sentinel.stub)
make_stub.assert_called_once_with(
client.credentials,
client.user_agent,
MUT.bigtable_table_admin_pb2.BigtableTableAdminStub,
MUT.TABLE_ADMIN_HOST,
extra_options=MUT._GRPC_EXTRA_OPTIONS,
)
def test_with_emulator(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import client as MUT
emulator_host = object()
client = _Client(None, None, emulator_host=emulator_host)
fake_stub = object()
make_insecure_stub_args = []
def mock_make_insecure_stub(*args):
make_insecure_stub_args.append(args)
return fake_stub
with _Monkey(MUT, make_insecure_stub=mock_make_insecure_stub):
result = self._call_fut(client)
self.assertIs(result, fake_stub)
self.assertEqual(make_insecure_stub_args, [
(
MUT.bigtable_table_admin_pb2.BigtableTableAdminStub,
emulator_host,
),
])
class TestClient(unittest.TestCase):
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
USER_AGENT = 'you-sir-age-int'
@staticmethod
def _get_target_class():
from google.cloud.bigtable.client import Client
return Client
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def _make_one_with_mocks(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub,
*args, **kwargs):
return self._make_one(*args, **kwargs)
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def test_constructor_default_scopes(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub):
from google.cloud.bigtable.client import DATA_SCOPE
expected_scopes = (DATA_SCOPE,)
credentials = _make_credentials()
custom_user_agent = 'custom-application'
client = self._make_one(
project=self.PROJECT, credentials=credentials,
user_agent=custom_user_agent)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(
client._credentials, credentials.with_scopes.return_value)
self.assertIsNone(client._http_internal)
self.assertFalse(client._read_only)
self.assertFalse(client._admin)
self.assertEqual(client.SCOPE, expected_scopes)
self.assertEqual(client.user_agent, custom_user_agent)
self.assertIsNone(client.emulator_host)
self.assertIs(client._data_stub, _make_data_stub.return_value)
self.assertIsNone(client._instance_stub_internal)
self.assertIsNone(client._operations_stub_internal)
self.assertIsNone(client._table_stub_internal)
# Check mocks.
credentials.with_scopes.assert_called_once_with(expected_scopes)
_make_data_stub.assert_called_once_with(client)
_make_instance_stub.assert_not_called()
_make_operations_stub.assert_not_called()
_make_table_stub.assert_not_called()
@mock.patch('google.cloud.bigtable.client._make_table_stub')
@mock.patch('google.cloud.bigtable.client._make_operations_stub')
@mock.patch('google.cloud.bigtable.client._make_instance_stub')
@mock.patch('google.cloud.bigtable.client._make_data_stub')
def test_constructor_with_admin(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub):
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.bigtable.client import ADMIN_SCOPE
from google.cloud.bigtable.client import DATA_SCOPE
expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
credentials = _make_credentials()
client = self._make_one(
project=self.PROJECT, credentials=credentials, admin=True)
self.assertEqual(client.project, self.PROJECT)
self.assertIs(
client._credentials, credentials.with_scopes.return_value)
self.assertIsNone(client._http_internal)
self.assertFalse(client._read_only)
self.assertTrue(client._admin)
self.assertEqual(client.SCOPE, expected_scopes)
self.assertEqual(client.user_agent, DEFAULT_USER_AGENT)
self.assertIsNone(client.emulator_host)
self.assertIs(client._data_stub, _make_data_stub.return_value)
self.assertIs(
client._instance_stub_internal, _make_instance_stub.return_value)
self.assertIs(
client._operations_stub_internal,
_make_operations_stub.return_value)
self.assertIs(
client._table_stub_internal, _make_table_stub.return_value)
# Check mocks.
credentials.with_scopes.assert_called_once_with(expected_scopes)
_make_data_stub.assert_called_once_with(client)
_make_instance_stub.assert_called_once_with(client)
_make_operations_stub.assert_called_once_with(client)
_make_table_stub.assert_called_once_with(client)
def test_constructor_both_admin_and_read_only(self):
credentials = _make_credentials()
with self.assertRaises(ValueError):
self._make_one(
project=self.PROJECT, credentials=credentials,
admin=True, read_only=True)
def test__get_scopes_default(self):
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials())
self.assertEqual(client._get_scopes(), (DATA_SCOPE,))
def test__get_scopes_admin(self):
from google.cloud.bigtable.client import ADMIN_SCOPE
from google.cloud.bigtable.client import DATA_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
admin=True)
expected_scopes = (DATA_SCOPE, ADMIN_SCOPE)
self.assertEqual(client._get_scopes(), expected_scopes)
def test__get_scopes_read_only(self):
from google.cloud.bigtable.client import READ_ONLY_SCOPE
client = self._make_one(
project=self.PROJECT, credentials=_make_credentials(),
read_only=True)
self.assertEqual(client._get_scopes(), (READ_ONLY_SCOPE,))
def _copy_helper_check_stubs(self, client, new_client):
if client._admin:
# Check the instance stub.
self.assertIs(
client._instance_stub_internal, mock.sentinel.inst_stub1)
self.assertIs(
new_client._instance_stub_internal, mock.sentinel.inst_stub2)
self.assertIsNot(
new_client._instance_stub_internal,
client._instance_stub_internal)
# Check the operations stub.
self.assertIs(
client._operations_stub_internal, mock.sentinel.ops_stub1)
self.assertIs(
new_client._operations_stub_internal, mock.sentinel.ops_stub2)
self.assertIsNot(
new_client._operations_stub_internal,
client._operations_stub_internal)
# Check the table stub.
self.assertIs(
client._table_stub_internal, mock.sentinel.table_stub1)
self.assertIs(
new_client._table_stub_internal, mock.sentinel.table_stub2)
self.assertIsNot(
new_client._table_stub_internal, client._table_stub_internal)
else:
# Check the instance stub.
self.assertIsNone(client._instance_stub_internal)
self.assertIsNone(new_client._instance_stub_internal)
# Check the operations stub.
self.assertIsNone(client._operations_stub_internal)
self.assertIsNone(new_client._operations_stub_internal)
# Check the table stub.
self.assertIsNone(client._table_stub_internal)
self.assertIsNone(new_client._table_stub_internal)
@mock.patch(
'google.cloud.bigtable.client._make_table_stub',
side_effect=[mock.sentinel.table_stub1, mock.sentinel.table_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_operations_stub',
side_effect=[mock.sentinel.ops_stub1, mock.sentinel.ops_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_instance_stub',
side_effect=[mock.sentinel.inst_stub1, mock.sentinel.inst_stub2],
)
@mock.patch(
'google.cloud.bigtable.client._make_data_stub',
side_effect=[mock.sentinel.data_stub1, mock.sentinel.data_stub2],
)
def _copy_test_helper(
self, _make_data_stub, _make_instance_stub,
_make_operations_stub, _make_table_stub, **kwargs):
credentials = _make_credentials()
# Make sure it "already" is scoped.
credentials.requires_scopes = False
client = self._make_one(
project=self.PROJECT, credentials=credentials, **kwargs)
self.assertIs(client._credentials, credentials)
new_client = client.copy()
self.assertEqual(new_client._admin, client._admin)
self.assertEqual(new_client._credentials, client._credentials)
self.assertEqual(new_client.project, client.project)
self.assertEqual(new_client.user_agent, client.user_agent)
# Make sure stubs are not preserved.
self.assertIs(client._data_stub, mock.sentinel.data_stub1)
self.assertIs(new_client._data_stub, mock.sentinel.data_stub2)
self.assertIsNot(new_client._data_stub, client._data_stub)
self._copy_helper_check_stubs(client, new_client)
# Check mocks.
credentials.with_scopes.assert_not_called()
stub_calls = [
mock.call(client),
mock.call(new_client),
]
self.assertEqual(_make_data_stub.mock_calls, stub_calls)
if client._admin:
self.assertEqual(_make_instance_stub.mock_calls, stub_calls)
self.assertEqual(_make_operations_stub.mock_calls, stub_calls)
self.assertEqual(_make_table_stub.mock_calls, stub_calls)
else:
_make_instance_stub.assert_not_called()
_make_operations_stub.assert_not_called()
_make_table_stub.assert_not_called()
def test_copy(self):
self._copy_test_helper()
def test_copy_admin(self):
self._copy_test_helper(admin=True)
def test_copy_read_only(self):
self._copy_test_helper(read_only=True)
def test_credentials_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials)
self.assertIs(client.credentials, credentials.with_scopes.return_value)
def test_project_name_property(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials)
project_name = 'projects/' + project
self.assertEqual(client.project_name, project_name)
def test_instance_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._instance_stub, client._instance_stub_internal)
def test_instance_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_instance_stub')
def test_operations_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._operations_stub,
client._operations_stub_internal)
def test_operations_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_operations_stub')
def test_table_stub_getter(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=True)
self.assertIs(client._table_stub, client._table_stub_internal)
def test_table_stub_non_admin_failure(self):
credentials = _make_credentials()
project = 'PROJECT'
client = self._make_one_with_mocks(
project=project, credentials=credentials, admin=False)
with self.assertRaises(ValueError):
getattr(client, '_table_stub')
def test_instance_factory_defaults(self):
from google.cloud.bigtable.cluster import DEFAULT_SERVE_NODES
from google.cloud.bigtable.instance import Instance
from google.cloud.bigtable.instance import (
_EXISTING_INSTANCE_LOCATION_ID)
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=PROJECT, credentials=credentials)
instance = client.instance(INSTANCE_ID, display_name=DISPLAY_NAME)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id,
_EXISTING_INSTANCE_LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, DEFAULT_SERVE_NODES)
self.assertIs(instance._client, client)
def test_instance_factory_w_explicit_serve_nodes(self):
from google.cloud.bigtable.instance import Instance
PROJECT = 'PROJECT'
INSTANCE_ID = 'instance-id'
DISPLAY_NAME = 'display-name'
LOCATION_ID = 'locname'
SERVE_NODES = 5
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=PROJECT, credentials=credentials)
instance = client.instance(
INSTANCE_ID, display_name=DISPLAY_NAME,
location=LOCATION_ID, serve_nodes=SERVE_NODES)
self.assertIsInstance(instance, Instance)
self.assertEqual(instance.instance_id, INSTANCE_ID)
self.assertEqual(instance.display_name, DISPLAY_NAME)
self.assertEqual(instance._cluster_location_id, LOCATION_ID)
self.assertEqual(instance._cluster_serve_nodes, SERVE_NODES)
self.assertIs(instance._client, client)
def test_list_instances(self):
from google.cloud.bigtable._generated import (
instance_pb2 as data_v2_pb2)
from google.cloud.bigtable._generated import (
bigtable_instance_admin_pb2 as messages_v2_pb2)
from tests.unit._testing import _FakeStub
LOCATION = 'projects/' + self.PROJECT + '/locations/locname'
FAILED_LOCATION = 'FAILED'
INSTANCE_ID1 = 'instance-id1'
INSTANCE_ID2 = 'instance-id2'
INSTANCE_NAME1 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID1)
INSTANCE_NAME2 = (
'projects/' + self.PROJECT + '/instances/' + INSTANCE_ID2)
credentials = _make_credentials()
client = self._make_one_with_mocks(
project=self.PROJECT,
credentials=credentials,
admin=True,
)
# Create request_pb
request_pb = messages_v2_pb2.ListInstancesRequest(
parent='projects/' + self.PROJECT,
)
# Create response_pb
response_pb = messages_v2_pb2.ListInstancesResponse(
failed_locations=[
FAILED_LOCATION,
],
instances=[
data_v2_pb2.Instance(
name=INSTANCE_NAME1,
display_name=INSTANCE_NAME1,
),
data_v2_pb2.Instance(
name=INSTANCE_NAME2,
display_name=INSTANCE_NAME2,
),
],
)
# Patch the stub used by the API method.
client._instance_stub_internal = stub = _FakeStub(response_pb)
# Create expected_result.
failed_locations = [FAILED_LOCATION]
instances = [
client.instance(INSTANCE_ID1, LOCATION),
client.instance(INSTANCE_ID2, LOCATION),
]
expected_result = (instances, failed_locations)
# Perform the method and check the result.
result = client.list_instances()
self.assertEqual(result, expected_result)
self.assertEqual(stub.method_calls, [(
'ListInstances',
(request_pb,),
{},
)])
class _Client(object):
def __init__(self, credentials, user_agent, emulator_host=None):
self.credentials = credentials
self.user_agent = user_agent
self.emulator_host = emulator_host
| apache-2.0 | 2,382,320,339,180,747,300 | 1,597,783,087,815,335,000 | 36.390244 | 79 | 0.629362 | false |
jwill89/clifford-discord-bot | source/retired/main.py | 1 | 31345 | import discord
from discord.ext import commands
import random
import MySQLdb
# ********************************************** #
# DEFINITIONS ********************************** #
# ********************************************** #
# Bot Description
description = '''Official Zealot Gaming Discord bot!'''
# Define Bot
bot = commands.Bot(command_prefix='!', description='Official Zealot Gaming Discord Bot')
# Define MySQL DB and Cursor Object
db = MySQLdb.connect(host="localhost",
user="discord_secure",
passwd="password-here",
db="discord")
# ********************************************** #
# FUNCTIONS ************************************ #
# ********************************************** #
# Check for Game Abbreviations
def is_game_abv(game_abv: str):
try:
sql = "SELECT 1 FROM games WHERE `abv` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_abv,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Game Names
def is_game_name(game_name: str):
try:
sql = "SELECT 1 FROM games WHERE `name` = %s LIMIT 1"
cur = db.cursor()
result = cur.execute(sql, (game_name,))
cur.close()
except Exception as e:
print('Exception: ' + str(e))
result = 0
# If we got a result, true, else false
return result == 1
# Check for Staff Member Status
def is_staff(member: discord.Member):
# Return True or False if User is a Staff Member
return 'Staff' in [r.name for r in member.roles]
# ********************************************** #
# BOT EVENTS *********************************** #
# ********************************************** #
# Bot Start Event
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
print(bot.user.id)
print('------')
await bot.change_presence(game=discord.Game(name='Zealot Gaming'))
# Welcome Message
@bot.event
async def on_member_join(member):
channel = bot.get_channel('108369515502411776')
fmt = "Everyone welcome {0.mention} to Zealot Gaming! Have a great time here! :wink: " \
"http://puu.sh/nG6Qe.wav".format(member)
await bot.send_message(channel, fmt)
# Goodbye Message
@bot.event
async def on_member_remove(member):
channel = bot.get_channel('108369515502411776')
fmt = ":wave: Goodbye {0}, we're sad to see you go!".format(member.name)
await bot.send_message(channel, fmt)
# ********************************************** #
# UN-GROUPED BOT COMMANDS ********************** #
# ********************************************** #
# COMMAND: !hello
@bot.command(pass_context=True)
async def hello(ctx):
# we do not want the bot to reply to itself
if ctx.message.author == bot.user:
return
else:
msg = 'Hello {0.message.author.mention}'.format(ctx)
await bot.send_message(ctx.message.channel, msg)
# COMMAND: !carlito
@bot.command()
async def carlito():
"""The legendary message of Carlito, maz00's personal cabana boy."""
await bot.say("wew men :ok_hand::skin-tone-1: that's some good shit:100: some good shit :100: that's some good shit"
" right there :100: :ok_hand::skin-tone-1: right there :ok_hand::skin-tone-1: :100: sign me the FUCK "
"up:100: :100: :ok_hand::skin-tone-1: :eggplant:")
# COMMAND: !eightball
@bot.command(pass_context=True)
async def eightball(ctx, question: str):
"""Rolls a magic 8-ball to answer any question you have."""
if question is None:
await bot.say('{0.message.author.mention}, you did not ask a question.'.format(ctx))
return
# Answers List (Classic 8-Ball, 20 Answers)
answers = ['It is certain.',
'It is decidedly so',
'Without a doubt.',
'Yes, definitely.',
'You may rely on it.',
'As I see it, yes.',
'Most likely.',
'Outlook good.',
'Yes.',
'Signs point to yes.',
'Reply hazy; try again.',
'Ask again later.',
'Better not tell you now.',
'Cannot predict now.',
'Concentrate, then ask again.',
'Do not count on it.',
'My reply is no.',
'My sources say no.',
'Outlook not so good.',
'Very doubtful.']
# Send the Answer
await bot.say('{0.message.author.mention}, '.format(ctx) + random.choice(answers))
# COMMAND: !roll
@bot.command()
async def roll(dice: str):
"""Rolls a dice in NdN format."""
try:
rolls, limit = map(int, dice.split('d'))
except Exception:
await bot.say('Format has to be in NdN!')
return
result = ', '.join(str(random.randint(1, limit)) for r in range(rolls))
await bot.say(result)
# COMMAND: !choose
@bot.command()
async def choose(*choices: str):
"""Chooses between multiple choices."""
await bot.say(random.choice(choices))
# COMMAND: !joined
@bot.command()
async def joined(member: discord.Member):
"""Says when a member joined."""
await bot.say('{0.name} joined in {0.joined_at}'.format(member))
# COMMAND: !get_roles
@bot.command()
async def get_roles(member: discord.Member):
"""Lists a User's Roles"""
total = 0
role_list = ''
for role in member.roles:
if total > 0:
role_list += ', '
role_list += str(role)
total += 1
await bot.say('{0.name} is a member of these roles: '.format(member) + role_list)
# COMMAND: !get_channel_id
@bot.command(pass_context=True)
async def get_channel_id(ctx):
"""Lists the ID of the channel the message is sent in."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
await bot.say('Channel ID is {0.id}'.format(ctx.message.channel))
# COMMAND: !join
@bot.command(pass_context=True)
async def join(ctx, *, role_name: str):
"""Allows a user to join a public group."""
# List of Allowed Public Roles
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan']
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only join allowed public groups.'.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
await bot.add_roles(ctx.message.author, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry! : ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully been added to the group **{1}**.'
.format(ctx.message.author, role_name))
# ********************************************** #
# GROUPED COMMANDS : EVENTS ******************** #
# ********************************************** #
# COMMAND: !events
@bot.group(pass_context=True)
async def events(ctx):
"""Manage events and attendance!"""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *description*, *edit*, *register*, or *remove*.')
# COMMAND: !events add
@events.command(name='add', pass_context=True)
async def events_add(ctx, date: str, time: str, *, title: str):
"""Add an event to the Events List!
Date **must** be in YYYY/MM/DD format. Time **must** be in UTC."""
# Set #events Channel
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if date is None:
await bot.say('Error: You must enter a date in YYYY/MM/DD format.')
return
# Make sure we have a time.
if time is None:
await bot.say('Error: You must enter a time in HH:MM format in UTC timezone.')
return
# Make sure we have a title.
if date is None:
await bot.say('Error: You must enter a title for the event.')
return
# Add Event to Database
try:
sql = "INSERT INTO events (`date`,`time`,`title`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (date, time, title))
event_id = cur.lastrowid
msg_text = "**Title**: {0} \n**Event ID**: {1} \n**Date & Time**: {2} at {3} (UTC)"
# Add Message to Events Channel and Save Message ID
message = await bot.send_message(event_channel, msg_text.format(title, event_id, date, time))
cur.execute('UPDATE events SET `message_id` = %s WHERE `event_id` = %s', (message.id, event_id))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the event to the list. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, your event was successfully added. The event ID is: {1}.'
.format(ctx.message.author, event_id))
# COMMAND: !events description
@events.command(name='description', pass_context=True)
async def events_description(ctx, event_id: int, *, desc: str):
"""Adds a Description to an Event Given an Event ID."""
# EVENT CHANNEL ID: 296694692135829504
event_channel = bot.get_channel('296694692135829504')
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Make sure we have a date.
if event_id is None:
await bot.say('Error: You must enter an event ID. Check the #events channel.')
return
# Make sure we have a date.
if desc is None:
await bot.say('Error: You must enter a description.')
return
try:
sql = "UPDATE events SET `description` = %s WHERE `event_id` = %s"
cur = db.cursor()
cur.execute(sql, (desc, event_id))
cur.execute("SELECT `message_id` FROM events WHERE `event_id` = %s", (event_id,))
msg_id = cur.fetchone()
message = await bot.get_message(event_channel, msg_id[0])
msg_text = message.content + " \n**Description**: {0}".format(desc)
# Update Message in Events Channel with Description
await bot.edit_message(message, msg_text)
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding a description to the event. '.format(ctx.message.author)
+ str(e))
return
# Success Message
await bot.say('{0.mention}, the event was successfully updated with a description.'.format(ctx.message.author))
# ********************************************** #
# GROUPED COMMANDS : GAMES ********************* #
# ********************************************** #
# COMMAND: !games
@bot.group(pass_context=True)
async def games(ctx):
"""Manages games for the roster."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !games add
@games.command(name='add', pass_context=True)
async def games_add(ctx, game_abv: str, *, game_name: str):
"""Adds a game to the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation is already in use.'.format(ctx.message.author))
return
# Does Game Name Exist?
if is_game_name(game_name):
await bot.say('{0.mention}, this game is already in the list.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO games (`abv`,`name`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error adding the game to the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully added to the games list!'.format(ctx.message.author))
# COMMAND: !games edit
@games.command(name='edit', pass_context=True)
async def games_edit(ctx, game_abv: str, *, game_name: str):
"""Updates a game in the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_abv) or is_game_name(game_name)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE games SET `abv` = %s, `name = %s WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_abv, game_name, game_abv, game_name))
db.commit()
cur.close()
except Exception as e:
await bot.say('{0.mention}, there was an error updating the game in the games list. '.format(ctx.message.author)
+ str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully updated in the games list!'.format(ctx.message.author))
# COMMAND: !games remove
@games.command(name='remove', pass_context=True)
async def games_remove(ctx, *, game_or_abv: str):
"""Removes a game from the list of games available in the roster."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Is there anything to update?
if not (is_game_abv(game_or_abv) or is_game_name(game_or_abv)):
await bot.say('{0.mention}, either the abbreviation of game must exist to update.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM games WHERE `abv` = %s OR `name` = %s"
cur = db.cursor()
cur.execute(sql, (game_or_abv, game_or_abv))
db.commit()
cur.close()
except Exception as e:
await bot.say("{0.mention}, there was an error deleting the game from the games list."
" ".format(ctx.message.author) + str(e))
return
# Display Success Message
await bot.say('{0.mention}, the game was successfully deleted from the games list!'.format(ctx.message.author))
# COMMAND: !games list
@games.command(name='list', pass_context=True)
async def games_list(ctx):
"""Sends a message to the user with the current games and abbreviations for use in the roster."""
# Handle Database
try:
sql = "SELECT `abv`, `name` FROM games ORDER BY `name`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the list of games for you."
" I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
abvs = ''
names = ''
for row in result:
abvs += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Abbreviation", value=abvs, inline=True)
embed.add_field(name="Game Name", value=names, inline=True)
# Send Table to User Privately
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : ROSTER ******************** #
# ********************************************** #
# COMMAND: !roster
@bot.group(pass_context=True)
async def roster(ctx):
"""Handles Roster Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid roster command passed. Must be *add*, *edit*, *list*, or *remove*.')
# COMMAND: !roster add
@roster.command(name='add', pass_context=True)
async def roster_add(ctx, game_abv: str, *, ign: str):
"""Adds username to roster.
User a game abbreviation from the games list. Only one entry per game. Include all in-game names if necessary."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO roster (`discord_account`,`game_abv`,`game_account`) VALUES (%s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error adding your information to the roster.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the roster!'.format(ctx))
# COMMAND: !roster edit
@roster.command(name='edit', pass_context=True)
async def roster_edit(ctx, game_abv: str, *, ign: str):
"""Updates a roster entry for a specific game.
If the either Game Name or your in-Game Name have spaces, put them in quotes."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game'
' abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE roster SET `game_account` = %s WHERE `discord_account` = %s AND `game_abv` = %s"
cur = db.cursor()
cur.execute(sql, (ign, username, game_abv))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully updated!'.format(ctx))
# COMMAND: !roster remove
@roster.command(name='remove', pass_context=True)
async def roster_remove(ctx, game_abv: str, *, ign: str):
"""Removes a user's entries in the roster for the specified game."""
username = str(ctx.message.author)
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable '
'game abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM roster WHERE `discord_account` = %s AND `game_abv` = %s AND `game_account` = %s"
cur = db.cursor()
cur.execute(sql, (username, game_abv, ign))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting your roster information.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your roster information was successfully deleted!'.format(ctx))
# COMMAND: !roster list
@roster.command(name='list', pass_context=True)
async def roster_list(ctx, game_abv: str):
"""Sends a message to the user with the current roster for the specified game."""
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say('{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "SELECT `discord_account`, `game_account` FROM roster WHERE `game_abv` = %s ORDER BY `discord_account`"
cur = db.cursor()
cur.execute(sql, (game_abv,))
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the roster for you. "
"I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
accounts = ''
names = ''
for row in result:
accounts += (row[0] + '\n')
names += (row[1] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="Discord Account", value=accounts, inline=True)
embed.add_field(name="In-Game Name", value=names, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# ********************************************** #
# GROUPED COMMANDS : RECRUIT ******************* #
# ********************************************** #
# COMMAND: !recruit
@bot.group(pass_context=True)
async def recruit(ctx):
"""Handles Recruitment Post and Invites Management."""
if ctx.invoked_subcommand is None:
await bot.say('Invalid recruitment command passed. Must be *add*, *edit*, *invite*, *list*, or *remove*.')
# COMMAND: !recruit add
@recruit.command(name='add', pass_context=True)
async def recruit_add(ctx, game_abv: str, *, link: str):
"""Adds recruitment post link to the recruitment list. Use a game abbreviation from the games list."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Does Game Abbreviation Exist?
if not is_game_abv(game_abv):
await bot.say(
'{0.mention}, this abbreviation does not exist. Use !games display for a list of acceptable game '
'abbreviations.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "INSERT INTO recruitment (`game`,`link`) VALUES (%s, %s)"
cur = db.cursor()
cur.execute(sql, (game_abv, link))
db.commit()
cur.close()
except Exception:
await bot.say(
'{0.message.author.mention}, there was an error adding your recruitment link to the list.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, your information was successfully added to the recruitment '
'posts list!'.format(ctx))
# COMMAND: !recruit edit
@recruit.command(name='edit', pass_context=True)
async def roster_edit(ctx, entry_id: int, *, link: str):
"""Updates a recruitment post entry with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "UPDATE recruitment SET `link` = %s WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (link, entry_id))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error updating the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully updated!'.format(ctx))
# COMMAND: !recruit remove
@recruit.command(name='remove', pass_context=True)
async def recruit_remove(ctx, entry_id: int):
"""Removes an entry for the recruitment posts list with the specified entry ID."""
# Is the user allowed? (Must be staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Handle Database
try:
sql = "DELETE FROM recruitment WHERE `entry_id` = %s"
cur = db.cursor()
cur.execute(sql, (entry_id,))
db.commit()
cur.close()
except Exception:
await bot.say('{0.message.author.mention}, there was an error deleting the specified '
'recruitment entry.'.format(ctx))
return
# Display Success Message
await bot.say('{0.message.author.mention}, the recruitment entry was successfully deleted!'.format(ctx))
# COMMAND: !recruit list
@recruit.command(name='list', pass_context=True)
async def recruit_list(ctx):
"""Lists all recruitment post entries in the system."""
# Handle Database
try:
sql = "SELECT * FROM recruitment ORDER BY `game`"
cur = db.cursor()
cur.execute(sql)
result = cur.fetchall()
cur.close()
except Exception:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error getting the recruitment list "
"for you. I'm sorry!".format(ctx.message.author))
return
# Create Variables for Embed Table
entries = ''
game_abvs = ''
links = ''
for row in result:
entries += (row[0] + '\n')
game_abvs += (row[1] + '\n')
links += (row[2] + '\n')
# Create Embed Table
embed = discord.Embed()
embed.add_field(name="ID", value=entries, inline=True)
embed.add_field(name="Game", value=game_abvs, inline=True)
embed.add_field(name="Link", value=links, inline=True)
# Send Table to Channel
await bot.send_message(ctx.message.channel, embed=embed)
# COMMAND: !recruit invite
@recruit.command(name='invite')
async def recruit_invite(duration: int):
"""Provides an invite link to the Discord server. Set duration to 0 for permanent invite."""
# Default Duration 30 Minutes, Else Convert to Minutes
if duration is None:
duration = 1800
else:
duration *= 60
# WELCOME CHANNEL ID: 141622052133142529
welcome_channel = bot.get_channel('141622052133142529')
# Create the Invite
new_invite = await bot.create_invite(welcome_channel, max_age=duration)
# Send Message with Invite Link
await bot.say('Your newly generated invite link is: {0.url}'.format(new_invite))
# ********************************************** #
# MODERATOR COMMANDS *************************** #
# ********************************************** #
# COMMAND: !give_role
@bot.command(pass_context=True)
async def give_role(ctx, username: str, *, role_name: str):
"""Assigns a role to a user."""
# List of Roles Staff Can Add To.
allowed_roles = ['Europe',
'North America',
'Oceania',
'Overwatch',
'League of Legends',
'Co-op',
'Minna-chan',
'Squire',
'Knight',
'Zealot']
# Is the user allowed? (Must be Staff)
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
if role_name not in allowed_roles:
await bot.say('{0.mention}, you may only assign users to public roles, Guest, or Registered Member'
.format(ctx.message.author))
return
# Define role, then add role to member.
try:
role = discord.utils.get(ctx.message.server.roles, name=role_name)
user = discord.utils.get(ctx.message.server.members, name=username)
await bot.add_roles(user, role)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an granting the role to the user."
" ".format(ctx.message.author) + str(e))
return
# Success Message
await bot.say('{0.mention}, you have successfully added **{1}** to the group **{2}**'
'.'.format(ctx.message.author, username, role_name))
# COMMAND: !kick
@bot.command(name='kick', pass_context=True)
async def mod_kick(ctx, username: str, *, reason: str):
"""Kicks a user from the server."""
# User must be a staff member
if not is_staff(ctx.message.author):
await bot.say('{0.mention}, you must be a staff member to use this command.'.format(ctx.message.author))
return
# Add to DB and Post Message
try:
# Variables Needed
member = discord.utils.get(ctx.message.server.members, name=username)
staffer = ctx.message.author
# Handle Database
sql = "INSERT INTO mod_log (`action`,`user`, `user_id`, `staff`, `staff_id`, reason) " \
"VALUES ('kick', %s, %s, %s, %s, %s)"
cur = db.cursor()
cur.execute(sql, (str(member), member.id, str(staffer), staffer.id, reason))
# Save Last Row ID
case_id = cur.lastrowid
# Insert Message
log_channel = bot.get_channel('303262467205890051')
msg_text = "**Case #{0}** | Kick :boot: \n**User**: {1} ({2}) " \
"\n**Moderator**: {3} ({4}) \n**Reason**: {5}"
# Add Message to Events Channel and Save Message ID
case_message = await bot.send_message(log_channel, msg_text.format(case_id, str(member), member.id, str(staffer), staffer.id, reason))
cur.execute("UPDATE mod_log SET `message_id` = %s WHERE `case_id` = %s", (case_message.id, case_id))
# Finish Database Stuff and Commit
db.commit()
cur.close()
# Kick the Member
await bot.kick(member)
except Exception as e:
await bot.send_message(ctx.message.channel, "{0.mention}, there was an error when kicking the user."
" ".format(ctx.message.author) + str(e))
await bot.say("{0.mention}, the user was successfully kicked. A log entry has been added.".format(ctx.message.author))
# ********************************************** #
# START THE BOT ******************************** #
# ********************************************** #
# Run the Bot
bot.run('token-here')
| gpl-3.0 | 471,848,882,085,952,500 | -8,774,262,674,599,069,000 | 33.866518 | 142 | 0.58759 | false |
AlbertoPeon/invenio | modules/bibsword/lib/bibsword_client_templates.py | 37 | 41746 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
'''
BibSWORD Client Templates
'''
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_RECORD
class BibSwordTemplate:
'''
This class contains attributes and methods that allows to display all
information used by the BibSword web user interface. Theses informations
are form, validation or error messages
'''
def __init__(self):
''' No init necessary for this class '''
#---------------------------------------------------------------------------
# BibSword WebSubmit Interface
#---------------------------------------------------------------------------
def tmpl_display_submit_ack(self, remote_id, link):
'''
This method generate the html code that displays the acknoledgement
message after the submission of a record.
@param remote_id: id of the record given by arXiv
@param link: links to modify or consult submission
@return: string containing the html code
'''
html = ''
html += '''<h1>Success !</h1>'''
html += '''<p>The record has been successfully pushed to arXiv ! <br />''' \
'''You will get an email once it will be accepted by ''' \
'''arXiv moderator.</p>'''
html += '''<p>The arXiv id of the submission is: <b>%s</b></p>''' % \
remote_id
html += '''<p><a href="www.arxiv.org/user">Manage your submission</a></p>'''
return html
#---------------------------------------------------------------------------
# BibSword Administrator Interface
#---------------------------------------------------------------------------
def tmpl_display_admin_page(self, submissions, first_row, last_row,
total_rows, is_prev, is_last, offset,
error_messages=None):
'''
format the html code that display the submission table
@param submissions: list of all submissions and their status
@return: html code to be displayed
'''
if error_messages == None:
error_messages = []
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
%(error_message)s
<input type="hidden" name="status" value="display_submission"/>
<input type="hidden" name="first_row" value="%(first_row)s"/>
<input type="hidden" name="last_row" value="%(last_row)s"/>
<input type="hidden" name="total_rows" value="%(total_rows)s" />
<input type="submit" name="submit" value="New submission"/><br/>
<br />
<input type="submit" name="submit" value="Refresh all"/><br/>
<br />
Display
<select name="offset">
<option value="5" %(selected_1)s>5</option>
<option value="10" %(selected_2)s>10</option>
<option value="25" %(selected_3)s>25</option>
<option value="50" %(selected_4)s>50</option>
<option value=%(total_rows)s %(selected_5)s>all</option>
</select>
rows per page <input type="submit" name="submit" value="Select" /><br />
<br />
<input type="submit" name="submit" value="First" %(is_prev)s/>
<input type="submit" name="submit" value="Prev" %(is_prev)s/>
Pages %(first_row)s - %(last_row)s / %(total_rows)s
<input type="submit" name="submit" value="Next" %(is_last)s/>
<input type="submit" name="submit" value="Last" %(is_last)s/><br/>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Submission state</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record number</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'first_row' : first_row,
'last_row' : last_row,
'total_rows' : total_rows,
'is_prev' : is_prev,
'is_last' : is_last,
'selected_1' : offset[0],
'selected_2' : offset[1],
'selected_3' : offset[2],
'selected_4' : offset[3],
'selected_5' : offset[4],
'submissions' : self.fill_submission_table(submissions)
}
return body
def tmpl_display_remote_server_info(self, server_info):
'''
Display a table containing all server informations
@param server_info: tuple containing all server infos
@return: html code for the table containing infos
'''
body = '''<table width="%(table_width)s">\n''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">ID</td>\n''' \
''' <td>%(server_id)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Name</td>\n''' \
''' <td>%(server_name)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Host</td>\n''' \
''' <td>%(server_host)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Username</td>\n''' \
''' <td>%(username)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Password</td>\n''' \
''' <td>%(password)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Email</td>\n''' \
''' <td>%(email)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Realm</td>\n''' \
''' <td>%(realm)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">Record URL</td>\n''' \
''' <td>%(url_base_record)s</td>\n''' \
''' </tr>\n ''' \
''' <tr>\n''' \
''' <td bgcolor="#e6e6fa">URL Servicedocument</td>\n'''\
''' <td>%(url_servicedocument)s</td>\n''' \
''' </tr>\n ''' \
'''</table>''' % {
'table_width' : '50%',
'server_id' : server_info['server_id'],
'server_name' : server_info['server_name'],
'server_host' : server_info['server_host'],
'username' : server_info['username'],
'password' : server_info['password'],
'email' : server_info['email'],
'realm' : server_info['realm'],
'url_base_record' : server_info['url_base_record'],
'url_servicedocument': server_info['url_servicedocument']
}
return body
def tmpl_display_remote_servers(self, remote_servers, id_record,
error_messages):
'''
format the html code that display a dropdown list containing the
servers
@param self: reference to the current instance of the class
@param remote_servers: list of tuple containing server's infos
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_server"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Forward a record</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Enter the number of the report to submit: </p>
</td>
<td align="left" width="%(row_width)s">
<input type="text" name="id_record" size="20"
value="%(id_record)s"/>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a remote server: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_remote_server" size="1">
<option value="0">-- select a remote server --</option>
%(remote_server)s
</select>
</td>
</tr>
<tr>
<td colspan="2" align="center">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'error_message': \
self.display_error_message_row(error_messages),
'table_width' : '100%',
'row_width' : '50%',
'id_record' : id_record,
'remote_server': \
self.fill_dropdown_remote_servers(remote_servers)
}
return body
def tmpl_display_collections(self, selected_server, server_infos,
collections, id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server and a dropdown list containing the server's
collections
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param collections: list contianing server's collections
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_collection"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2></td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa"><h2>Collection</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">Select a collection: </td>
<td align="left" width="%(row_width)s">
<select name="id_collection" size="1">
<option value="0">-- select a collection --</option>
%(collection)s
</select>
</td>
</tr>
<tr>
<td align="center" colspan="2">
<input type="submit" value="Select" name="submit"/>
</td>
</tr>
</table>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : \
self.display_error_message_row(error_messages),
'id_server' : selected_server['id'],
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload': server_infos['maxUploadSize'],
'collection' : \
self.fill_dropdown_collections(collections),
'id_record' : id_record,
'recid' : recid
}
return body
def tmpl_display_categories(self, selected_server, server_infos,
selected_collection, collection_infos,
primary_categories, secondary_categories,
id_record, recid, error_messages):
'''
format the html code that display the selected server, the informations
about the selected server, the selected collections, the informations
about the collection and a dropdown list containing the server's
primary and secondary categories
@param self: reference to the current instance of the class
@param selected_server: tuple containing selected server name and id
@param server_infos: tuple containing infos about selected server
@param selected_collection: selected collection
@param collection_infos: tuple containing infos about selected col
@param primary_categories: list of mandated categories for the col
@return: string containing html code
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="select_primary_category"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Remote server</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
SWORD version: %(server_version)s
</td>
</tr>
<tr>
<td align="left">
Max upload size [Kb]: %(server_maxUpload)s
</td>
</tr>
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify server" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Collection</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="2" valign="center">
<h2>%(collection_name)s</h2>
</td>
<td align="left">
URL: %(collection_url)s
</td>
</tr>
<tr>
<td align="left">
Accepted media types:
<ul>%(collection_accept)s</ul>
</td>
</tr>
<tr>
<td align="left" colspan=2>
<input type="submit" value="Modify collection" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Mandatory category</h2>
</td>
</tr>
<tr>
<td align="right" width="%(row_width)s">
<p>Select a mandated category: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_primary" size="1">
<option value="0">-- select a category --</option>
%(primary_categories)s
</select>
</td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Optional categories</h2>
</td>
</tr>
<td align="right" width="%(row_width)s">
<p>Select optional categories: </p>
</td>
<td align="left" width="%(row_width)s">
<select name="id_categories" size="10" multiple>
%(secondary_categories)s
</select>
</td>
</tr>
</table>
<p> </p>
<center>
<input type="submit" value="Select" name="submit"/>
</center>
</form>''' % {
'table_width' : '100%',
'row_width' : '50%',
'error_message' : self.display_error_message_row(
error_messages),
# hidden input
'id_server' : selected_server['id'],
'id_collection' : selected_collection['id'],
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : selected_server['name'],
'server_version' : server_infos['version'],
'server_maxUpload' : server_infos['maxUploadSize'],
'collection_name' : selected_collection['label'],
'collection_accept': ''.join([
'''<li>%(name)s </li>''' % {
'name': accept
} for accept in collection_infos['accept'] ]),
'collection_url' : selected_collection['url'],
'primary_categories' : self.fill_dropdown_primary(
primary_categories),
'secondary_categories': self.fill_dropdown_secondary(
secondary_categories)
}
return body
def tmpl_display_metadata(self, user, server, collection, primary,
categories, medias, metadata, id_record, recid,
error_messages):
'''
format a string containing every informations before a submission
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<input type="hidden" name="status" value="check_submission"/>
<input type="hidden" name="id_remote_server" value="%(id_server)s"/>
<input type="hidden" name="id_collection" value="%(id_collection)s"/>
<input type="hidden" name="id_primary" value="%(id_primary)s"/>
<input type="hidden" name="id_categories" value="%(id_categories)s"/>
<input type="hidden" name="id_record" value="%(id_record)s"/>
<input type="hidden" name="recid" value="%(recid)s"/>
%(error_message)s
<input type="submit" name="submit" value="Cancel" />
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="2" bgcolor="#e6e6fa">
<h2>Destination</h2>
</td>
</tr>
<tr>
<td align="center" rowspan="3" valign="center">
<h2>%(server_name)s</h2>
</td>
<td align="left">
Collection: %(collection_name)s ( %(collection_url)s )
</td>
</tr>
<tr>
<td align="left">
Primary category: %(primary_name)s ( %(primary_url)s )
</td>
</tr>
%(categories)s
<tr>
<td align="left" colspan="2">
<input type="submit" value="Modify destination" name="submit"/>
</td>
</tr>
</table>
<p> </p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa">
<h2>Submitter</h2>
</td>
</tr>
<tr>
<td width="%(row_width)s">Name:</td>
<td><input type="text" name="author_name" size="100"
value="%(user_name)s"/></td>
</tr>
<tr>
<td>Email:</td>
<td><input type="text" name="author_email" size="100"
value="%(user_email)s"/></td>
</tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="4" bgcolor="#e6e6fa"><h2>Media</h2></td>
</tr>
<tr><td colspan="4">%(medias)s%(media_help)s</td></tr>
</table>
<p></p>
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="3" bgcolor="#e6e6fa"><h2>Metadata</h2> <font color="red"><b>Warning:</b> modification(s) will not be saved on the %(CFG_SITE_NAME)s</font>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Report Number<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="id" size="100" value="%(id)s"/></td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Title<span style="color:#f00">*</span>:</p></td>
<td><input type="text" name="title" size="100" value="%(title)s"/>
</td>
</tr>
<tr>
<td align="left" width="%(row_width)s"><p>Summary<span style="color:#f00">*</span>:</p></td>
<td>
<textarea name="summary" rows="4" cols="100">%(summary)s
</textarea>
</td>
</tr>
%(contributors)s
%(journal_refs)s
%(report_nos)s
</table>
<p><font color="red">The fields having a * are mandatory</font></p>
<center>
<input type="submit" value="Submit" name="submit"/>
</center>
<form>''' % {
'table_width' : '100%',
'row_width' : '25%',
'error_message' : \
self.display_error_message_row(error_messages),
'CFG_SITE_NAME': CFG_SITE_NAME,
# hidden input
'id_server' : server['id'],
'id_collection' : collection['id'],
'id_primary' : primary['id'],
'id_categories' : self.get_list_id_categories(categories),
'id_record' : id_record,
'recid' : recid,
# variables values
'server_name' : server['name'],
'collection_name' : collection['label'],
'collection_url' : collection['url'],
'primary_name' : primary['label'],
'primary_url' : primary['url'],
'categories' : self.fill_optional_category_list(categories),
#user
'user_name' : user['nickname'],
'user_email' : user['email'],
# media
'medias' : self.fill_media_list(medias, server['id']),
'media_help' : self.fill_arxiv_help_message(),
# metadata
'id' : metadata['id'],
'title' : metadata['title'],
'summary' : metadata['summary'],
'contributors' : self.fill_contributors_list(
metadata['contributors']),
'journal_refs' : self.fill_journal_refs_list(
metadata['journal_refs']),
'report_nos' : self.fill_report_nos_list(
metadata['report_nos'])
}
return body
def tmpl_display_list_submission(self, submissions):
'''
Display the data of submitted recods
'''
body = '''
<form method="post" enctype="multipart/form-data" accept-charset="UTF-8" action="/bibsword">
<table border="1" valign="top" width="%(table_width)s">
<tr>
<td align="left" colspan="7" bgcolor="#e6e6fa">
<h2>Document successfully submitted !</h2>
</td>
</tr>
<tr>
<td align="center" bgcolor="#e6e6fa"><b>Remote server</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Submitter</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Record id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Remote id</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Status</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Dates</b></td>
<td align="center" bgcolor="#e6e6fa"><b>Links</b></td>
</tr>
%(submissions)s
</table>
<a href=%(CFG_SITE_URL)s/bibsword>Return</a>
</form>''' % {
'table_width' : '100%',
'submissions' : self.fill_submission_table(submissions),
'CFG_SITE_URL' : CFG_SITE_URL
}
return body
#***************************************************************************
# Private functions
#***************************************************************************
def display_error_message_row(self, error_messages):
'''
return a list of error_message in form of a bullet list
@param error_messages: list of error_messages to display
@return: html code that display list of errors
'''
# if no errors, return nothing
if len(error_messages) == 0:
return ''
if len(error_messages) == 1:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> The following error was found: </p>
<ul>
'''
else:
# display a generic header message
body = '''
<tr>
<td align="left" colspan=2>
<font color='red'>
<p> Following errors were found: </p>
<ul>
'''
# insert each error lines
for error_message in error_messages:
body = body + '''
<li>%(error)s</li>''' % {
'error': error_message
}
body = body + '''
</ul>
</font>
</td>
</tr>'''
return body
def fill_submission_table(self, submissions):
'''
This method return the body of the submission state table. each
submissions given in parameters has one row
@param submissions: submission status list
@return: html table body
'''
return ''.join([
''' <tr>
<td>%(id_server)s: <a href="%(server_infos)s">
%(server_name)s</a></td>
<td>%(user_name)s <br/> %(user_email)s</td
<td>%(id_bibrec)s: <a href="%(cfg_site_url)s/%(CFG_SITE_RECORD)s/%(id_bibrec)s"
target="_blank">%(no_bibrec)s</a></td>
<td><a href="%(url_base_remote)s/%(id_remote)s" target="_blank">
%(id_remote)s</a></td>
<td>%(status)s</td>
<td><b>submission: </b> %(submission_date)s <br/>
<b>publication: </b> %(publication_date)s <br/>
<b>removal: </b> %(removal_date)s </td>
<td><b>media: </b> <a href="%(media_link)s" target="_blank">
%(media_link)s</a> <br/>
<b>metadata: </b> <a href="%(metadata_link)s" target="_blank">
%(metadata_link)s</a> <br />
<b>status: </b> <a href="%(status_link)s" target="_blank">
%(status_link)s</a></td>
</tr>''' % {
'id_server' : str(submission['id_server']),
'server_infos' : "%s/bibsword/remoteserverinfos?id=%s" % \
(CFG_SITE_URL, submission['id_server']),
'server_name' : str(submission['server_name']),
'user_name' : str(submission['user_name']),
'user_email' : str(submission['user_email']),
'id_bibrec' : str(submission['id_record']),
'no_bibrec' : str(submission['report_no']),
'id_remote' : str(submission['id_remote']),
'status' : str(submission['status']),
'submission_date' : str(submission['submission_date']),
'publication_date' : str(submission['publication_date']),
'removal_date' : str(submission['removal_date']),
'media_link' : str(submission['link_medias']),
'metadata_link' : str(submission['link_metadata']),
'status_link' : str(submission['link_status']),
'url_base_remote' : str(submission['url_base_remote']),
'cfg_site_url' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD
} for submission in submissions])
def fill_dropdown_remote_servers(self, remote_servers):
'''
This method fill a dropdown list of remote servers.
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s - %(host)s</option>''' % {
'id': str(remote_server['id']),
'name': remote_server['name'],
'host': remote_server['host']
} for remote_server in remote_servers])
def fill_dropdown_collections(self, collections):
'''
This method fill a dropdown list of collection.
@param collections: list of all collections with name - url
@return: html code to display
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': str(collection['id']),
'name': collection['label']
} for collection in collections])
def fill_dropdown_primary(self, primary_categories):
'''
This method fill the primary dropdown list with the data given in
parameter
@param primary_categories: list of 'url' 'name' tuples
@return: html code generated to display the list
'''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': primary_categorie['id'],
'name': primary_categorie['label']
} for primary_categorie in primary_categories])
def fill_dropdown_secondary(self, categories):
'''
This method fill a category list. This list is allows the multi-selection
or items. To proced to select more than one categorie through a browser
ctrl + clic
@param categories: list of all categories in the format name - url
@return: the html code that display each dropdown list
'''
if len(categories) == '':
return ''
return ''.join([
'''<option value="%(id)s">%(name)s</option>''' % {
'id': category['id'],
'name': category['label']
} for category in categories])
def fill_optional_category_list(self, categories):
'''
This method fill a table row that contains name and url of the selected
optional categories
@param self: reference to the current instance of the class
@param categories: list of tuples containing selected categories
@return: html code generated to display the list
'''
if len(categories) == 0:
return ''
else:
body = '<tr><td>'
body = body + ''.join([
'''<p>Category: %(category_name)s ( %(category_url)s )</p>'''%{
'category_name' : category['label'],
'category_url' : category['url']
} for category in categories
])
body = body + '</td></tr>'
return body
def fill_media_list(self, medias, id_server, from_websubmit=False):
'''
Concatenate the string that contains all informations about the medias
'''
text = ''
if id_server == 1:
media_type = self.format_media_list_by_type(medias)
text = '''<h2>Please select files you would like to push to arXiv:</h2>'''
for mtype in media_type:
text += '''<h3><b>%s: </b></h3>''' % mtype['media_type']
text += '''<blockquote>'''
for media in mtype['media_list']:
text += '''<input type='checkbox' name="media" value="%s" %s>%s</input><br />''' % (media['path'], media['selected'], media['name'])
text += "</blockquote>"
text += '''<h3>Upload</h3>'''
text += '''<blockquote>'''
text += '''<p>In addition, you can submit a new file (that will be added to the record as well):</p>'''
if from_websubmit == False:
text += '''<input type="file" name="new_media" size="60"/>'''
return text
def fill_arxiv_help_message(self):
text = '''</blockquote><h3>Help</h3>'''
text += '''<blockquote><p>For more help on which formats are supported by arXiv, please see:'''\
'''<ul>'''\
'''<li><a href="http://arxiv.org/help/submit" target="_blank">'''\
'''arXiv submission process</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_tex" target="_blank">'''\
'''arXiv TeX submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_docx" target="_blank">'''\
'''arXiv Docx submission</a></li>'''\
'''<li><a href="http://arxiv.org/help/submit_pdf" target="_blank">'''\
'''arXiv PDF submission</a></li>'''\
'''</ul></blockquote>'''
return text
def fill_contributors_list(self, contributors):
'''
This method display each contributors in the format of an editable input
text. This allows the user to modifie it.
@param contributors: The list of all contributors of the document
@return: the html code that display each dropdown list
'''
output = ''
is_author = True
for author in contributors:
nb_rows = 2
author_name = \
'''<LABEL for="name">Name: </LABEL><input type = "text" ''' \
'''name = "contributor_name" size = "100" value = "%s" ''' \
'''id="name"/>''' % author['name']
author_email = \
'''<LABEL for = "email">Email: </LABEL>''' \
'''<input type = "text" name = "contributor_email" ''' \
'''size = "100" value = "%s" id = "email"/>''' % author['email']
author_affiliations = []
for affiliation in author['affiliation']:
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL> ''' \
'''<input type="text" name = "contributor_affiliation" ''' \
'''size = "100" value = "%s" id = "affiliation"/>''' % \
affiliation
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
affiliation_row = \
'''<LABEL for = "affiliation">Affiliation: </LABEL>''' \
'''<input type = "text" name = "contributor_affiliation" ''' \
'''size = "100" id = "affiliation"/>'''
author_affiliations.append(affiliation_row)
nb_rows = nb_rows + 1
if is_author:
output += '''<tr><td rowspan = "%s">Author: </td>''' % nb_rows
is_author = False
else:
output += '''<tr><td rowspan = "%s">Contributor: </td>''' % \
nb_rows
output += '''<td>%s</td></tr>''' % author_name
if author_email != '':
output += '''<tr><td>%s</td></tr>''' % author_email
for affiliation in author_affiliations:
output += '''<tr><td>%s</td></tr>''' % affiliation
output += \
'''<input type = "hidden" name = "contributor_affiliation" ''' \
'''value = "next"/>'''
return output
def fill_journal_refs_list(self, journal_refs):
'''
This method display each journal references in the format of an editable
input text. This allows the user to modifie it.
@param journal_refs: The list of all journal references of the document
@return: the html code that display each dropdown list
'''
html = ''
if len(journal_refs) > 0:
html += '''
<tr>
<td align="left"><p>Journal references: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="journal_refs" size="100" ''' \
'''value="%(journal_ref)s"/></p>
''' % {
'journal_ref': journal_ref
} for journal_ref in journal_refs
])
html = html + '''
</td>
</tr>
'''
return html
def fill_report_nos_list(self, report_nos):
'''
Concatate a string containing the report number html table rows
'''
html = ''
if len(report_nos) > 0:
html = '''
<tr>
<td align="left"><p>Report numbers: </p></td><td>
'''
html = html + ''.join([
'''
<p><input type="text" name="report_nos" size="100" ''' \
'''value="%(report_no)s"/></p>''' % {
'report_no': report_no
} for report_no in report_nos
])
html = html + '''
</td>
</tr>
'''
return html
def get_list_id_categories(self, categories):
'''
gives the id of the categores tuple
'''
id_categories = []
for category in categories:
id_categories.append(category['id'])
return id_categories
def format_media_list_by_type(self, medias):
'''
This function format the media by type (Main, Uploaded, ...)
'''
#format media list by type of document
media_type = []
for media in medias:
# if it is the first media of this type, create a new type
is_type_in_media_type = False
for type in media_type:
if media['collection'] == type['media_type']:
is_type_in_media_type = True
if is_type_in_media_type == False:
type = {}
type['media_type'] = media['collection']
type['media_list'] = []
media_type.append(type)
# insert the media in the good media_type element
for type in media_type:
if type['media_type'] == media['collection']:
type['media_list'].append(media)
return media_type
| gpl-2.0 | -8,219,154,245,460,730,000 | -2,442,074,999,988,352,500 | 36.745027 | 178 | 0.470033 | false |
jcai19/smm_gem5 | src/dev/x86/I82094AA.py | 69 | 2201 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
from X86IntPin import X86IntSinkPin
class I82094AA(BasicPioDevice):
type = 'I82094AA'
cxx_class = 'X86ISA::I82094AA'
cxx_header = "dev/x86/i82094aa.hh"
apic_id = Param.Int(1, 'APIC id for this IO APIC')
int_master = MasterPort("Port for sending interrupt messages")
int_latency = Param.Latency('1ns', \
"Latency for an interrupt to propagate through this device.")
external_int_pic = Param.I8259(NULL, "External PIC, if any")
def pin(self, line):
return X86IntSinkPin(device=self, number=line)
| bsd-3-clause | 4,672,868,235,308,226,000 | 5,144,134,946,577,102,000 | 47.911111 | 73 | 0.76647 | false |
MichaelNedzelsky/intellij-community | python/helpers/py3only/docutils/parsers/rst/languages/zh_cn.py | 52 | 3936 | # -*- coding: utf-8 -*-
# $Id: zh_cn.py 7119 2011-09-02 13:00:23Z milde $
# Author: Panjunyong <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Simplified Chinese language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
'注意': 'attention',
'小心': 'caution',
'code (translation required)': 'code',
'危险': 'danger',
'错误': 'error',
'提示': 'hint',
'重要': 'important',
'注解': 'note',
'技巧': 'tip',
'警告': 'warning',
'忠告': 'admonition',
'侧框': 'sidebar',
'主题': 'topic',
'line-block (translation required)': 'line-block',
'parsed-literal (translation required)': 'parsed-literal',
'醒目': 'rubric',
'铭文': 'epigraph',
'要点': 'highlights',
'pull-quote (translation required)': 'pull-quote',
'复合': 'compound',
'容器': 'container',
#u'questions (translation required)': 'questions',
'表格': 'table',
'csv表格': 'csv-table',
'列表表格': 'list-table',
#u'qa (translation required)': 'questions',
#u'faq (translation required)': 'questions',
'元数据': 'meta',
'math (translation required)': 'math',
#u'imagemap (translation required)': 'imagemap',
'图片': 'image',
'图例': 'figure',
'包含': 'include',
'原文': 'raw',
'代替': 'replace',
'统一码': 'unicode',
'日期': 'date',
'类型': 'class',
'角色': 'role',
'默认角色': 'default-role',
'标题': 'title',
'目录': 'contents',
'章节序号': 'sectnum',
'题头': 'header',
'页脚': 'footer',
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
'target-notes (translation required)': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Simplified Chinese name to registered (in directives/__init__.py)
directive name mapping."""
roles = {
# language-dependent: fixed
'缩写': 'abbreviation',
'简称': 'acronym',
'code (translation required)': 'code',
'index (translation required)': 'index',
'i (translation required)': 'index',
'下标': 'subscript',
'上标': 'superscript',
'title-reference (translation required)': 'title-reference',
'title (translation required)': 'title-reference',
't (translation required)': 'title-reference',
'pep-reference (translation required)': 'pep-reference',
'pep (translation required)': 'pep-reference',
'rfc-reference (translation required)': 'rfc-reference',
'rfc (translation required)': 'rfc-reference',
'强调': 'emphasis',
'加粗': 'strong',
'字面': 'literal',
'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
'citation-reference (translation required)': 'citation-reference',
'substitution-reference (translation required)': 'substitution-reference',
'target (translation required)': 'target',
'uri-reference (translation required)': 'uri-reference',
'uri (translation required)': 'uri-reference',
'url (translation required)': 'uri-reference',
'raw (translation required)': 'raw',}
"""Mapping of Simplified Chinese role names to canonical role names
for interpreted text."""
| apache-2.0 | -5,296,435,303,859,877,000 | -2,684,509,705,544,192,500 | 35.038462 | 78 | 0.619264 | false |
nicain/dipde_dev | dipde/interfaces/zmq/__init__.py | 1 | 4371 | import time
import zmq
import threading
context = zmq.Context()
class PublishCallback(object):
def __init__(self, port, topic, message_callback):
self.port = port
self.topic = topic
self.message_callback = message_callback
self.socket = context.socket(zmq.PUB)
def __call__(self, obj):
message_to_send = list(self.message_callback(obj))
message_to_send.insert(0,"%s" % self.topic)
self.socket.send_multipart(map(str, message_to_send))
class PublishCallbackConnect(PublishCallback):
def __init__(self, port, topic, message_callback):
super(self.__class__, self).__init__(port, topic, message_callback)
self.socket.connect("tcp://localhost:%s" % self.port)
class CallbackSubscriber(object):
def __init__(self, port=None, receive_callback=None):
self.socket = context.socket(zmq.SUB)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.socket.setsockopt(zmq.SUBSCRIBE, 'test')
if receive_callback is None:
def receive_callback(received_message):
print received_message
self.receive_callback = receive_callback
def run(self):
while True:
received_message_multipart = self.socket.recv_multipart()
topic = received_message_multipart[0]
received_message = received_message_multipart[1:]
self.receive_callback(received_message)
class CallbackSubscriberThread(threading.Thread):
def __init__(self, port=None):
super(self.__class__, self).__init__()
self.subscriber = CallbackSubscriber(port)
self.daemon = True
def run(self, port=None):
self.subscriber.run()
@property
def port(self):
return self.subscriber.port
class RequestConnection(object):
def __init__(self, port):
self.port = port
self.socket = context.socket(zmq.REQ)
self.socket.connect("tcp://localhost:%s" % port)
def __call__(self, *args):
if len(args) == 0:
self.socket.send(b'')
else:
self.socket.send_multipart(map(str,args))
message = self.socket.recv_multipart()
return float(message[0])
def shutdown(self):
self.socket.close()
assert self.socket.closed
class ReplyServerBind(object):
def __init__(self, reply_function, port=None):
self.socket = context.socket(zmq.REP)
if port is None:
self.port = self.socket.bind_to_random_port('tcp://*', min_port=6001, max_port=6004, max_tries=100)
else:
self.socket.bind("tcp://*:%s" % port)
self.port = port
self.reply_function = reply_function
def run(self):
while True:
message = self.socket.recv()
# print 'message:', message, type(message)
if message == 'SHUTDOWN':
break
# print 'message'
if message == '':
requested_args = tuple()
else:
requested_args = tuple([float(message)])
self.socket.send_multipart([b"%s" % self.reply_function(*requested_args)])
self.socket.send('DOWN')
self.socket.close()
class ReplyServerThread(threading.Thread):
def __init__(self, reply_function, port=None):
super(ReplyServerThread, self).__init__()
self._stop = threading.Event()
self.daemon = True
self.reply_function = reply_function
self.server = ReplyServerBind(self.reply_function, port=port)
def run(self, port=None):
self.server.run()
def shutdown(self):
shutdown_socket = context.socket(zmq.REQ)
shutdown_socket.connect("tcp://localhost:%s" % self.port)
shutdown_socket.send('SHUTDOWN')
message = shutdown_socket.recv()
assert message == 'DOWN'
self.stop()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
@property
def port(self):
return self.server.port
| gpl-3.0 | -4,613,229,068,779,222,000 | 7,997,007,816,909,817,000 | 27.94702 | 111 | 0.577442 | false |
ZhaoCJ/django | django/db/backends/utils.py | 2 | 5407 | from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
SET_DIRTY_ATTRS = frozenset(['execute', 'executemany', 'callproc'])
WRAP_ERROR_ATTRS = frozenset([
'callproc', 'close', 'execute', 'executemany',
'fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
if attr in CursorWrapper.SET_DIRTY_ATTRS:
self.db.set_dirty()
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
class CursorDebugWrapper(CursorWrapper):
def execute(self, sql, params=None):
self.db.set_dirty()
start = time()
try:
with self.db.wrap_database_errors:
if params is None:
# params default might be backend specific
return self.cursor.execute(sql)
return self.cursor.execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
self.db.set_dirty()
start = time()
try:
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if not ' ' in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return "{0:f}".format(value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return "%.*f" % (decimal_places, value)
| bsd-3-clause | -825,858,004,658,473,000 | -1,845,481,204,777,218,300 | 30.619883 | 105 | 0.554281 | false |
infinitespace/deepdive | ddlib/without_ddlib.py | 15 | 1750 | #! /usr/bin/env python
# File: udf/ext_has_spouse_features.py
import sys, json
# For each input tuple
# TODO: Sample Data and the input schema.
# sample json
for row in sys.stdin:
obj = json.loads(row)
# Library/DSL??? This is a span, it should be an object.
p1_start = obj["p1.start_position"]
p1_length = obj["p1.length"]
p1_end = p1_start + p1_length
p2_start = obj["p2.start_position"]
p2_length = obj["p2.length"]
p2_end = p2_start + p2_length
p1_text = obj["words"][p1_start:p1_length]
p2_text = obj["words"][p2_start:p2_length]
left_idx = min(p1_end, p2_end)
right_idx = max(p1_start, p2_start)
# Features for this pair come in here
features = set()
# Feature 1: Find out if a lemma of marry occurs.
# A better feature would ensure this is on the dependency path between the two.
lemma_between = obj["lemma"][left_idx:right_idx]
married_words = ['marry', 'widow']
for mw in married_words:
if mw in lemma_between:
features.add("important_word=%s" % mw)
# Feature 2: The number of words between the two phrases.
# Intuition: if they are close by, the link may be stronger.
words_between = obj["words"][left_idx:right_idx]
l = len(words_between)
if l < 5: features.add("num_words_between=%s" % l)
else: features.add("many_words_between")
# Feature 3: Check if the last name matches heuristically.
last_word_left = obj["words"][p1_end - 1]
last_word_right = obj["words"][p2_end - 1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
# TODO: Add more features, look at dependency paths, etc
for feature in features:
print json.dumps({
"relation_id": obj["relation_id"],
"feature": feature
})
| apache-2.0 | -6,046,975,721,191,156,000 | 3,871,582,294,627,286,000 | 30.818182 | 81 | 0.658286 | false |
yantrabuddhi/nativeclient | buildbot/buildbot_lib.py | 1 | 21952 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os.path
import shutil
import subprocess
import stat
import sys
import time
import traceback
ARCH_MAP = {
'32': {
'gyp_arch': 'ia32',
'scons_platform': 'x86-32',
},
'64': {
'gyp_arch': 'x64',
'scons_platform': 'x86-64',
},
'arm': {
'gyp_arch': 'arm',
'scons_platform': 'arm',
},
'mips32': {
'gyp_arch': 'mips32',
'scons_platform': 'mips32',
},
}
def RunningOnBuildbot():
return os.environ.get('BUILDBOT_SLAVE_TYPE') is not None
def GetHostPlatform():
sys_platform = sys.platform.lower()
if sys_platform.startswith('linux'):
return 'linux'
elif sys_platform in ('win', 'win32', 'windows', 'cygwin'):
return 'win'
elif sys_platform in ('darwin', 'mac'):
return 'mac'
else:
raise Exception('Can not determine the platform!')
def SetDefaultContextAttributes(context):
"""
Set default values for the attributes needed by the SCons function, so that
SCons can be run without needing ParseStandardCommandLine
"""
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = 'opt'
context['default_scons_mode'] = ['opt-host', 'nacl']
context['default_scons_platform'] = ('x86-64' if platform == 'win'
else 'x86-32')
context['android'] = False
context['clang'] = False
context['asan'] = False
context['pnacl'] = False
context['use_glibc'] = False
context['use_breakpad_tools'] = False
context['max_jobs'] = 8
context['scons_args'] = []
# Windows-specific environment manipulation
def SetupWindowsEnvironment(context):
# Poke around looking for MSVC. We should do something more principled in
# the future.
# The name of Program Files can differ, depending on the bittage of Windows.
program_files = r'c:\Program Files (x86)'
if not os.path.exists(program_files):
program_files = r'c:\Program Files'
if not os.path.exists(program_files):
raise Exception('Cannot find the Program Files directory!')
# The location of MSVC can differ depending on the version.
msvc_locs = [
('Microsoft Visual Studio 12.0', 'VS120COMNTOOLS', '2013'),
('Microsoft Visual Studio 10.0', 'VS100COMNTOOLS', '2010'),
('Microsoft Visual Studio 9.0', 'VS90COMNTOOLS', '2008'),
('Microsoft Visual Studio 8.0', 'VS80COMNTOOLS', '2005'),
]
for dirname, comntools_var, gyp_msvs_version in msvc_locs:
msvc = os.path.join(program_files, dirname)
context.SetEnv('GYP_MSVS_VERSION', gyp_msvs_version)
if os.path.exists(msvc):
break
else:
# The break statement did not execute.
raise Exception('Cannot find MSVC!')
# Put MSVC in the path.
vc = os.path.join(msvc, 'VC')
comntools = os.path.join(msvc, 'Common7', 'Tools')
perf = os.path.join(msvc, 'Team Tools', 'Performance Tools')
context.SetEnv('PATH', os.pathsep.join([
context.GetEnv('PATH'),
vc,
comntools,
perf]))
# SCons needs this variable to find vsvars.bat.
# The end slash is needed because the batch files expect it.
context.SetEnv(comntools_var, comntools + '\\')
# This environment variable will SCons to print debug info while it searches
# for MSVC.
context.SetEnv('SCONS_MSCOMMON_DEBUG', '-')
# Needed for finding devenv.
context['msvc'] = msvc
SetupGyp(context, [])
def SetupGyp(context, extra_vars=[]):
if RunningOnBuildbot():
goma_opts = [
'use_goma=1',
'gomadir=/b/build/goma',
]
else:
goma_opts = []
context.SetEnv('GYP_DEFINES', ' '.join(
context['gyp_vars'] + goma_opts + extra_vars))
def SetupLinuxEnvironment(context):
if context['arch'] == 'mips32':
# Ensure the trusted mips toolchain is installed.
cmd = ['build/package_version/package_version.py', '--packages',
'linux_x86/mips_trusted', 'sync', '-x']
Command(context, cmd)
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupMacEnvironment(context):
SetupGyp(context, ['target_arch='+context['gyp_arch']])
def SetupAndroidEnvironment(context):
SetupGyp(context, ['OS=android', 'target_arch='+context['gyp_arch']])
context.SetEnv('GYP_CROSSCOMPILE', '1')
def ParseStandardCommandLine(context):
"""
The standard buildbot scripts require 3 arguments to run. The first
argument (dbg/opt) controls if the build is a debug or a release build. The
second argument (32/64) controls the machine architecture being targeted.
The third argument (newlib/glibc) controls which c library we're using for
the nexes. Different buildbots may have different sets of arguments.
"""
parser = optparse.OptionParser()
parser.add_option('-n', '--dry-run', dest='dry_run', default=False,
action='store_true', help='Do not execute any commands.')
parser.add_option('--inside-toolchain', dest='inside_toolchain',
default=bool(os.environ.get('INSIDE_TOOLCHAIN')),
action='store_true', help='Inside toolchain build.')
parser.add_option('--android', dest='android', default=False,
action='store_true', help='Build for Android.')
parser.add_option('--clang', dest='clang', default=False,
action='store_true', help='Build trusted code with Clang.')
parser.add_option('--coverage', dest='coverage', default=False,
action='store_true',
help='Build and test for code coverage.')
parser.add_option('--validator', dest='validator', default=False,
action='store_true',
help='Only run validator regression test')
parser.add_option('--asan', dest='asan', default=False,
action='store_true', help='Build trusted code with ASan.')
parser.add_option('--scons-args', dest='scons_args', default =[],
action='append', help='Extra scons arguments.')
parser.add_option('--step-suffix', metavar='SUFFIX', default='',
help='Append SUFFIX to buildbot step names.')
parser.add_option('--no-gyp', dest='no_gyp', default=False,
action='store_true', help='Do not run the gyp build')
parser.add_option('--no-goma', dest='no_goma', default=False,
action='store_true', help='Do not run with goma')
parser.add_option('--use-breakpad-tools', dest='use_breakpad_tools',
default=False, action='store_true',
help='Use breakpad tools for testing')
parser.add_option('--skip-build', dest='skip_build', default=False,
action='store_true',
help='Skip building steps in buildbot_pnacl')
parser.add_option('--skip-run', dest='skip_run', default=False,
action='store_true',
help='Skip test-running steps in buildbot_pnacl')
options, args = parser.parse_args()
if len(args) != 3:
parser.error('Expected 3 arguments: mode arch toolchain')
# script + 3 args == 4
mode, arch, toolchain = args
if mode not in ('dbg', 'opt', 'coverage'):
parser.error('Invalid mode %r' % mode)
if arch not in ARCH_MAP:
parser.error('Invalid arch %r' % arch)
if toolchain not in ('newlib', 'glibc', 'pnacl', 'nacl_clang'):
parser.error('Invalid toolchain %r' % toolchain)
# TODO(ncbray) allow a command-line override
platform = GetHostPlatform()
context['platform'] = platform
context['mode'] = mode
context['arch'] = arch
context['android'] = options.android
# ASan is Clang, so set the flag to simplify other checks.
context['clang'] = options.clang or options.asan
context['validator'] = options.validator
context['asan'] = options.asan
# TODO(ncbray) turn derived values into methods.
context['gyp_mode'] = {
'opt': 'Release',
'dbg': 'Debug',
'coverage': 'Debug'}[mode]
context['gn_is_debug'] = {
'opt': 'false',
'dbg': 'true',
'coverage': 'true'}[mode]
context['gyp_arch'] = ARCH_MAP[arch]['gyp_arch']
context['gyp_vars'] = []
if context['clang']:
context['gyp_vars'].append('clang=1')
if context['asan']:
context['gyp_vars'].append('asan=1')
context['default_scons_platform'] = ARCH_MAP[arch]['scons_platform']
context['default_scons_mode'] = ['nacl']
# Only Linux can build trusted code on ARM.
# TODO(mcgrathr): clean this up somehow
if arch != 'arm' or platform == 'linux':
context['default_scons_mode'] += [mode + '-host']
context['use_glibc'] = toolchain == 'glibc'
context['pnacl'] = toolchain == 'pnacl'
context['nacl_clang'] = toolchain == 'nacl_clang'
context['max_jobs'] = 8
context['dry_run'] = options.dry_run
context['inside_toolchain'] = options.inside_toolchain
context['step_suffix'] = options.step_suffix
context['no_gyp'] = options.no_gyp
context['no_goma'] = options.no_goma
context['coverage'] = options.coverage
context['use_breakpad_tools'] = options.use_breakpad_tools
context['scons_args'] = options.scons_args
context['skip_build'] = options.skip_build
context['skip_run'] = options.skip_run
# Don't run gyp on coverage builds.
if context['coverage']:
context['no_gyp'] = True
for key, value in sorted(context.config.items()):
print '%s=%s' % (key, value)
def EnsureDirectoryExists(path):
"""
Create a directory if it does not already exist.
Does not mask failures, but there really shouldn't be any.
"""
if not os.path.exists(path):
os.makedirs(path)
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
try:
RemovePath(path)
except Exception:
print 'Failed to remove %s' % path
else:
print 'Skipping %s' % path
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if GetHostPlatform() == 'win':
count = 0
while True:
try:
op(*args)
break
except Exception:
print "FAILED: %s %s" % (op.__name__, repr(args))
count += 1
if count < 5:
print "RETRY: %s %s" % (op.__name__, repr(args))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def PermissionsFixOnError(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def _RemoveDirectory(path):
print 'Removing %s' % path
if os.path.exists(path):
shutil.rmtree(path, onerror=PermissionsFixOnError)
print ' Succeeded.'
else:
print ' Path does not exist, nothing to do.'
def RemoveDirectory(path):
"""
Remove a directory if it exists.
Does not mask failures, although it does retry a few times on Windows.
"""
Retry(_RemoveDirectory, path)
def RemovePath(path):
"""Remove a path, file or directory."""
if os.path.isdir(path):
RemoveDirectory(path)
else:
if os.path.isfile(path) and not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
os.remove(path)
# This is a sanity check so Command can print out better error information.
def FileCanBeFound(name, paths):
# CWD
if os.path.exists(name):
return True
# Paths with directories are not resolved using the PATH variable.
if os.path.dirname(name):
return False
# In path
for path in paths.split(os.pathsep):
full = os.path.join(path, name)
if os.path.exists(full):
return True
return False
def RemoveGypBuildDirectories():
# Remove all directories on all platforms. Overkill, but it allows for
# straight-line code.
# Windows
RemoveDirectory('build/Debug')
RemoveDirectory('build/Release')
RemoveDirectory('build/Debug-Win32')
RemoveDirectory('build/Release-Win32')
RemoveDirectory('build/Debug-x64')
RemoveDirectory('build/Release-x64')
# Linux and Mac
RemoveDirectory('../xcodebuild')
RemoveDirectory('../out')
RemoveDirectory('src/third_party/nacl_sdk/arm-newlib')
def RemoveSconsBuildDirectories():
RemoveDirectory('scons-out')
RemoveDirectory('breakpad-out')
# Execute a command using Python's subprocess module.
def Command(context, cmd, cwd=None):
print 'Running command: %s' % ' '.join(cmd)
# Python's subprocess has a quirk. A subprocess can execute with an
# arbitrary, user-defined environment. The first argument of the command,
# however, is located using the PATH variable of the Python script that is
# launching the subprocess. Modifying the PATH in the environment passed to
# the subprocess does not affect Python's search for the first argument of
# the command (the executable file.) This is a little counter intuitive,
# so we're forcing the search to use the same PATH variable as is seen by
# the subprocess.
env = context.MakeCommandEnv()
script_path = os.environ['PATH']
os.environ['PATH'] = env['PATH']
try:
if FileCanBeFound(cmd[0], env['PATH']) or context['dry_run']:
# Make sure that print statements before the subprocess call have been
# flushed, otherwise the output of the subprocess call may appear before
# the print statements.
sys.stdout.flush()
if context['dry_run']:
retcode = 0
else:
retcode = subprocess.call(cmd, cwd=cwd, env=env)
else:
# Provide a nicer failure message.
# If subprocess cannot find the executable, it will throw a cryptic
# exception.
print 'Executable %r cannot be found.' % cmd[0]
retcode = 1
finally:
os.environ['PATH'] = script_path
print 'Command return code: %d' % retcode
if retcode != 0:
raise StepFailed()
return retcode
# A specialized version of CommandStep.
def SCons(context, mode=None, platform=None, parallel=False, browser_test=False,
args=(), cwd=None):
python = sys.executable
if mode is None: mode = context['default_scons_mode']
if platform is None: platform = context['default_scons_platform']
if parallel:
jobs = context['max_jobs']
else:
jobs = 1
cmd = []
if browser_test and context.Linux():
# Although we could use the "browser_headless=1" Scons option, it runs
# xvfb-run once per Chromium invocation. This is good for isolating
# the tests, but xvfb-run has a stupid fixed-period sleep, which would
# slow down the tests unnecessarily.
cmd.extend(['xvfb-run', '--auto-servernum'])
cmd.extend([
python, 'scons.py',
'--verbose',
'-k',
'-j%d' % jobs,
'--mode='+','.join(mode),
'platform='+platform,
])
cmd.extend(context['scons_args'])
if context['clang']: cmd.append('--clang')
if context['asan']: cmd.append('--asan')
if context['use_glibc']: cmd.append('--nacl_glibc')
if context['pnacl']: cmd.append('bitcode=1')
if context['nacl_clang']: cmd.append('nacl_clang=1')
if context['use_breakpad_tools']:
cmd.append('breakpad_tools_dir=breakpad-out')
if context['android']:
cmd.append('android=1')
# Append used-specified arguments.
cmd.extend(args)
Command(context, cmd, cwd)
class StepFailed(Exception):
"""
Thrown when the step has failed.
"""
class StopBuild(Exception):
"""
Thrown when the entire build should stop. This does not indicate a failure,
in of itself.
"""
class Step(object):
"""
This class is used in conjunction with a Python "with" statement to ensure
that the preamble and postamble of each build step gets printed and failures
get logged. This class also ensures that exceptions thrown inside a "with"
statement don't take down the entire build.
"""
def __init__(self, name, status, halt_on_fail=True):
self.status = status
if 'step_suffix' in status.context:
suffix = status.context['step_suffix']
else:
suffix = ''
self.name = name + suffix
self.halt_on_fail = halt_on_fail
self.step_failed = False
# Called on entry to a 'with' block.
def __enter__(self):
sys.stdout.flush()
print
print '@@@BUILD_STEP %s@@@' % self.name
self.status.ReportBegin(self.name)
# The method is called on exit from a 'with' block - even for non-local
# control flow, i.e. exceptions, breaks, continues, returns, etc.
# If an exception is thrown inside a block wrapped with a 'with' statement,
# the __exit__ handler can suppress the exception by returning True. This is
# used to isolate each step in the build - if an exception occurs in a given
# step, the step is treated as a failure. This allows the postamble for each
# step to be printed and also allows the build to continue of the failure of
# a given step doesn't halt the build.
def __exit__(self, type, exception, trace):
sys.stdout.flush()
if exception is None:
# If exception is None, no exception occurred.
step_failed = False
elif isinstance(exception, StepFailed):
step_failed = True
print
print 'Halting build step because of failure.'
print
else:
step_failed = True
print
print 'The build step threw an exception...'
print
traceback.print_exception(type, exception, trace, file=sys.stdout)
print
if step_failed:
self.status.ReportFail(self.name)
print '@@@STEP_FAILURE@@@'
if self.halt_on_fail:
print
print 'Entire build halted because %s failed.' % self.name
sys.stdout.flush()
raise StopBuild()
else:
self.status.ReportPass(self.name)
sys.stdout.flush()
# Suppress any exception that occurred.
return True
# Adds an arbitrary link inside the build stage on the waterfall.
def StepLink(text, link):
print '@@@STEP_LINK@%s@%s@@@' % (text, link)
# Adds arbitrary text inside the build stage on the waterfall.
def StepText(text):
print '@@@STEP_TEXT@%s@@@' % (text)
class BuildStatus(object):
"""
Keeps track of the overall status of the build.
"""
def __init__(self, context):
self.context = context
self.ever_failed = False
self.steps = []
def ReportBegin(self, name):
pass
def ReportPass(self, name):
self.steps.append((name, 'passed'))
def ReportFail(self, name):
self.steps.append((name, 'failed'))
self.ever_failed = True
# Handy info when this script is run outside of the buildbot.
def DisplayBuildStatus(self):
print
for step, status in self.steps:
print '%-40s[%s]' % (step, status)
print
if self.ever_failed:
print 'Build failed.'
else:
print 'Build succeeded.'
def ReturnValue(self):
return int(self.ever_failed)
class BuildContext(object):
"""
Encapsulates the information needed for running a build command. This
includes environment variables and default arguments for SCons invocations.
"""
# Only allow these attributes on objects of this type.
__slots__ = ['status', 'global_env', 'config']
def __init__(self):
# The contents of global_env override os.environ for any commands run via
# self.Command(...)
self.global_env = {}
# PATH is a special case. See: Command.
self.global_env['PATH'] = os.environ.get('PATH', '')
self.config = {}
self['dry_run'] = False
# Emulate dictionary subscripting.
def __getitem__(self, key):
return self.config[key]
# Emulate dictionary subscripting.
def __setitem__(self, key, value):
self.config[key] = value
# Emulate dictionary membership test
def __contains__(self, key):
return key in self.config
def Windows(self):
return self.config['platform'] == 'win'
def Linux(self):
return self.config['platform'] == 'linux'
def Mac(self):
return self.config['platform'] == 'mac'
def GetEnv(self, name, default=None):
return self.global_env.get(name, default)
def SetEnv(self, name, value):
self.global_env[name] = str(value)
def MakeCommandEnv(self):
# The external environment is not sanitized.
e = dict(os.environ)
# Arbitrary variables can be overridden.
e.update(self.global_env)
return e
def RunBuild(script, status):
try:
script(status, status.context)
except StopBuild:
pass
# Emit a summary step for three reasons:
# - The annotator will attribute non-zero exit status to the last build step.
# This can misattribute failures to the last build step.
# - runtest.py wraps the builds to scrape perf data. It emits an annotator
# tag on exit which misattributes perf results to the last build step.
# - Provide a label step in which to show summary result.
# Otherwise these go back to the preamble.
with Step('summary', status):
if status.ever_failed:
print 'There were failed stages.'
else:
print 'Success.'
# Display a summary of the build.
status.DisplayBuildStatus()
sys.exit(status.ReturnValue())
| bsd-3-clause | -1,810,363,383,319,395,800 | -3,272,165,514,509,889,000 | 30.722543 | 80 | 0.654246 | false |
tjcsl/director | web3/apps/sites/migrations/0001_initial.py | 1 | 1297 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-05 23:20
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0002_auto_20161105_2046'),
]
operations = [
migrations.CreateModel(
name='Website',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False, validators=[django.core.validators.MinValueValidator(1000)])),
('name', models.CharField(max_length=32, unique=True)),
('category', models.CharField(choices=[('legacy', 'legacy'), ('static', 'static'), ('php', 'php'), ('dynamic', 'dynamic')], max_length=16)),
('purpose', models.CharField(choices=[('user', 'user'), ('activity', 'activity')], max_length=16)),
('domain', models.TextField()),
('description', models.TextField()),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.User')),
],
),
]
| mit | -8,400,209,235,807,530,000 | -27,105,051,276,568,724 | 39.53125 | 156 | 0.596762 | false |
scroggo/skia | tools/skp/recreate_skps.py | 10 | 2822 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run the webpages_playback automation script."""
import os
import subprocess
import sys
sys.path.insert(0, os.getcwd())
from common.py.utils import gs_utils
from common.py.utils import shell_utils
SKP_VERSION_FILE = 'SKP_VERSION'
def _get_skp_version():
"""Find an unused SKP version."""
current_skp_version = None
with open(SKP_VERSION_FILE) as f:
current_skp_version = int(f.read().rstrip())
# Find the first SKP version which has no uploaded SKPs.
new_version = current_skp_version + 1
while True:
gs_path = 'playback_%d/skps' % new_version
if not gs_utils.GSUtils().does_storage_object_exist('chromium-skia-gm',
gs_path):
return new_version
new_version += 1
def main(chrome_src_path, browser_executable):
browser_executable = os.path.realpath(browser_executable)
skp_version = _get_skp_version()
print 'SKP_VERSION=%d' % skp_version
if os.environ.get('CHROME_HEADLESS'):
# Start Xvfb if running on a bot.
try:
shell_utils.run('sudo Xvfb :0 -screen 0 1280x1024x24 &', shell=True)
except Exception:
# It is ok if the above command fails, it just means that DISPLAY=:0
# is already up.
pass
upload_dir = 'playback_%d' % skp_version
webpages_playback_cmd = [
'python', os.path.join(os.path.dirname(os.path.realpath(__file__)),
'webpages_playback.py'),
'--page_sets', 'all',
'--browser_executable', browser_executable,
'--non-interactive',
'--upload_to_gs',
'--alternate_upload_dir', upload_dir,
'--chrome_src_path', chrome_src_path,
]
try:
shell_utils.run(webpages_playback_cmd)
finally:
# Clean up any leftover browser instances. This can happen if there are
# telemetry crashes, processes are not always cleaned up appropriately by
# the webpagereplay and telemetry frameworks.
procs = subprocess.check_output(['ps', 'ax'])
for line in procs.splitlines():
if browser_executable in line:
pid = line.strip().split(' ')[0]
if pid != str(os.getpid()) and not 'python' in line:
try:
shell_utils.run(['kill', '-9', pid])
except shell_utils.CommandFailedException as e:
print e
else:
print 'Refusing to kill self.'
print 'writing %s: %s' % (SKP_VERSION_FILE, skp_version)
with open(SKP_VERSION_FILE, 'w') as f:
f.write(str(skp_version))
if '__main__' == __name__:
if len(sys.argv) != 3:
print >> sys.stderr, 'USAGE: %s <chrome src path> <browser executable>'
sys.exit(1)
main(*sys.argv[1:])
| bsd-3-clause | -5,915,398,379,008,067,000 | 859,175,135,465,049,000 | 29.673913 | 77 | 0.634656 | false |
g-k/servo | tests/wpt/harness/wptrunner/testloader.py | 34 | 22201 | import json
import os
import sys
import urlparse
from abc import ABCMeta, abstractmethod
from Queue import Empty
from collections import defaultdict, OrderedDict, deque
from multiprocessing import Queue
import manifestinclude
import manifestexpected
import wpttest
from mozlog import structured
manifest = None
manifest_update = None
def do_delayed_imports():
# This relies on an already loaded module having set the sys.path correctly :(
global manifest, manifest_update
from manifest import manifest
from manifest import update as manifest_update
class TestChunker(object):
def __init__(self, total_chunks, chunk_number):
self.total_chunks = total_chunks
self.chunk_number = chunk_number
assert self.chunk_number <= self.total_chunks
self.logger = structured.get_default_logger()
def __call__(self, manifest):
raise NotImplementedError
class Unchunked(TestChunker):
def __init__(self, *args, **kwargs):
TestChunker.__init__(self, *args, **kwargs)
assert self.total_chunks == 1
def __call__(self, manifest):
for item in manifest:
yield item
class HashChunker(TestChunker):
def __call__(self):
chunk_index = self.chunk_number - 1
for test_path, tests in manifest:
if hash(test_path) % self.total_chunks == chunk_index:
yield test_path, tests
class EqualTimeChunker(TestChunker):
def _group_by_directory(self, manifest_items):
"""Split the list of manifest items into a ordered dict that groups tests in
so that anything in the same subdirectory beyond a depth of 3 is in the same
group. So all tests in a/b/c, a/b/c/d and a/b/c/e will be grouped together
and separate to tests in a/b/f
Returns: tuple (ordered dict of {test_dir: PathData}, total estimated runtime)
"""
class PathData(object):
def __init__(self, path):
self.path = path
self.time = 0
self.tests = []
by_dir = OrderedDict()
total_time = 0
for i, (test_path, tests) in enumerate(manifest_items):
test_dir = tuple(os.path.split(test_path)[0].split(os.path.sep)[:3])
if not test_dir in by_dir:
by_dir[test_dir] = PathData(test_dir)
data = by_dir[test_dir]
time = sum(wpttest.DEFAULT_TIMEOUT if test.timeout !=
"long" else wpttest.LONG_TIMEOUT for test in tests)
data.time += time
total_time += time
data.tests.append((test_path, tests))
return by_dir, total_time
def _maybe_remove(self, chunks, i, direction):
"""Trial removing a chunk from one chunk to an adjacent one.
:param chunks: - the list of all chunks
:param i: - the chunk index in the list of chunks to try removing from
:param direction: either "next" if we are going to move from the end to
the subsequent chunk, or "prev" if we are going to move
from the start into the previous chunk.
:returns bool: Did a chunk get moved?"""
source_chunk = chunks[i]
if direction == "next":
target_chunk = chunks[i+1]
path_index = -1
move_func = lambda: target_chunk.appendleft(source_chunk.pop())
elif direction == "prev":
target_chunk = chunks[i-1]
path_index = 0
move_func = lambda: target_chunk.append(source_chunk.popleft())
else:
raise ValueError("Unexpected move direction %s" % direction)
return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
def _maybe_add(self, chunks, i, direction):
"""Trial adding a chunk from one chunk to an adjacent one.
:param chunks: - the list of all chunks
:param i: - the chunk index in the list of chunks to try adding to
:param direction: either "next" if we are going to remove from the
the subsequent chunk, or "prev" if we are going to remove
from the the previous chunk.
:returns bool: Did a chunk get moved?"""
target_chunk = chunks[i]
if direction == "next":
source_chunk = chunks[i+1]
path_index = 0
move_func = lambda: target_chunk.append(source_chunk.popleft())
elif direction == "prev":
source_chunk = chunks[i-1]
path_index = -1
move_func = lambda: target_chunk.appendleft(source_chunk.pop())
else:
raise ValueError("Unexpected move direction %s" % direction)
return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):
"""Move from one chunk to another, assess the change in badness,
and keep the move iff it decreases the badness score.
:param source_chunk: chunk to move from
:param target_chunk: chunk to move to
:param path_index: 0 if we are moving from the start or -1 if we are moving from the
end
:param move_func: Function that actually moves between chunks"""
if len(source_chunk.paths) <= 1:
return False
move_time = source_chunk.paths[path_index].time
new_source_badness = self._badness(source_chunk.time - move_time)
new_target_badness = self._badness(target_chunk.time + move_time)
delta_badness = ((new_source_badness + new_target_badness) -
(source_chunk.badness + target_chunk.badness))
if delta_badness < 0:
move_func()
return True
return False
def _badness(self, time):
"""Metric of badness for a specific chunk
:param time: the time for a specific chunk"""
return (time - self.expected_time)**2
def _get_chunk(self, manifest_items):
by_dir, total_time = self._group_by_directory(manifest_items)
if len(by_dir) < self.total_chunks:
raise ValueError("Tried to split into %i chunks, but only %i subdirectories included" % (
self.total_chunks, len(by_dir)))
self.expected_time = float(total_time) / self.total_chunks
chunks = self._create_initial_chunks(by_dir)
while True:
# Move a test from one chunk to the next until doing so no longer
# reduces the badness
got_improvement = self._update_chunks(chunks)
if not got_improvement:
break
self.logger.debug(self.expected_time)
for i, chunk in chunks.iteritems():
self.logger.debug("%i: %i, %i" % (i + 1, chunk.time, chunk.badness))
assert self._all_tests(by_dir) == self._chunked_tests(chunks)
return self._get_tests(chunks)
@staticmethod
def _all_tests(by_dir):
"""Return a set of all tests in the manifest from a grouping by directory"""
return set(x[0] for item in by_dir.itervalues()
for x in item.tests)
@staticmethod
def _chunked_tests(chunks):
"""Return a set of all tests in the manifest from the chunk list"""
return set(x[0] for chunk in chunks.itervalues()
for path in chunk.paths
for x in path.tests)
def _create_initial_chunks(self, by_dir):
"""Create an initial unbalanced list of chunks.
:param by_dir: All tests in the manifest grouped by subdirectory
:returns list: A list of Chunk objects"""
class Chunk(object):
def __init__(self, paths, index):
"""List of PathData objects that together form a single chunk of
tests"""
self.paths = deque(paths)
self.time = sum(item.time for item in paths)
self.index = index
def appendleft(self, path):
"""Add a PathData object to the start of the chunk"""
self.paths.appendleft(path)
self.time += path.time
def append(self, path):
"""Add a PathData object to the end of the chunk"""
self.paths.append(path)
self.time += path.time
def pop(self):
"""Remove PathData object from the end of the chunk"""
assert len(self.paths) > 1
self.time -= self.paths[-1].time
return self.paths.pop()
def popleft(self):
"""Remove PathData object from the start of the chunk"""
assert len(self.paths) > 1
self.time -= self.paths[0].time
return self.paths.popleft()
@property
def badness(self_):
"""Badness metric for this chunk"""
return self._badness(self_.time)
initial_size = len(by_dir) / self.total_chunks
chunk_boundaries = [initial_size * i
for i in xrange(self.total_chunks)] + [len(by_dir)]
chunks = OrderedDict()
for i, lower in enumerate(chunk_boundaries[:-1]):
upper = chunk_boundaries[i + 1]
paths = by_dir.values()[lower:upper]
chunks[i] = Chunk(paths, i)
assert self._all_tests(by_dir) == self._chunked_tests(chunks)
return chunks
def _update_chunks(self, chunks):
"""Run a single iteration of the chunk update algorithm.
:param chunks: - List of chunks
"""
#TODO: consider replacing this with a heap
sorted_chunks = sorted(chunks.values(), key=lambda x:-x.badness)
got_improvement = False
for chunk in sorted_chunks:
if chunk.time < self.expected_time:
f = self._maybe_add
else:
f = self._maybe_remove
if chunk.index == 0:
order = ["next"]
elif chunk.index == self.total_chunks - 1:
order = ["prev"]
else:
if chunk.time < self.expected_time:
# First try to add a test from the neighboring chunk with the
# greatest total time
if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
order = ["next", "prev"]
else:
order = ["prev", "next"]
else:
# First try to remove a test and add to the neighboring chunk with the
# lowest total time
if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
order = ["prev", "next"]
else:
order = ["next", "prev"]
for direction in order:
if f(chunks, chunk.index, direction):
got_improvement = True
break
if got_improvement:
break
return got_improvement
def _get_tests(self, chunks):
"""Return the list of tests corresponding to the chunk number we are running.
:param chunks: List of chunks"""
tests = []
for path in chunks[self.chunk_number - 1].paths:
tests.extend(path.tests)
return tests
def __call__(self, manifest_iter):
manifest = list(manifest_iter)
tests = self._get_chunk(manifest)
for item in tests:
yield item
class TestFilter(object):
def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None):
if manifest_path is not None and include is None:
self.manifest = manifestinclude.get_manifest(manifest_path)
else:
self.manifest = manifestinclude.IncludeManifest.create()
if include:
self.manifest.set("skip", "true")
for item in include:
self.manifest.add_include(test_manifests, item)
if exclude:
for item in exclude:
self.manifest.add_exclude(test_manifests, item)
def __call__(self, manifest_iter):
for test_path, tests in manifest_iter:
include_tests = set()
for test in tests:
if self.manifest.include(test):
include_tests.add(test)
if include_tests:
yield test_path, include_tests
class TagFilter(object):
def __init__(self, tags):
self.tags = set(tags)
def __call__(self, test_iter):
for test in test_iter:
if test.tags & self.tags:
yield test
class ManifestLoader(object):
def __init__(self, test_paths, force_manifest_update=False):
do_delayed_imports()
self.test_paths = test_paths
self.force_manifest_update = force_manifest_update
self.logger = structured.get_default_logger()
if self.logger is None:
self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
def load(self):
rv = {}
for url_base, paths in self.test_paths.iteritems():
manifest_file = self.load_manifest(url_base=url_base,
**paths)
path_data = {"url_base": url_base}
path_data.update(paths)
rv[manifest_file] = path_data
return rv
def create_manifest(self, manifest_path, tests_path, url_base="/"):
self.update_manifest(manifest_path, tests_path, url_base, recreate=True)
def update_manifest(self, manifest_path, tests_path, url_base="/",
recreate=False):
self.logger.info("Updating test manifest %s" % manifest_path)
json_data = None
if not recreate:
try:
with open(manifest_path) as f:
json_data = json.load(f)
except IOError:
#If the existing file doesn't exist just create one from scratch
pass
if not json_data:
manifest_file = manifest.Manifest(None, url_base)
else:
try:
manifest_file = manifest.Manifest.from_json(tests_path, json_data)
except manifest.ManifestVersionMismatch:
manifest_file = manifest.Manifest(None, url_base)
manifest_update.update(tests_path, url_base, manifest_file)
manifest.write(manifest_file, manifest_path)
def load_manifest(self, tests_path, metadata_path, url_base="/"):
manifest_path = os.path.join(metadata_path, "MANIFEST.json")
if (not os.path.exists(manifest_path) or
self.force_manifest_update):
self.update_manifest(manifest_path, tests_path, url_base)
manifest_file = manifest.load(tests_path, manifest_path)
if manifest_file.url_base != url_base:
self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,
url_base))
manifest_file.url_base = url_base
manifest.write(manifest_file, manifest_path)
return manifest_file
def iterfilter(filters, iter):
for f in filters:
iter = f(iter)
for item in iter:
yield item
class TestLoader(object):
def __init__(self,
test_manifests,
test_types,
run_info,
manifest_filters=None,
meta_filters=None,
chunk_type="none",
total_chunks=1,
chunk_number=1,
include_https=True):
self.test_types = test_types
self.run_info = run_info
self.manifest_filters = manifest_filters if manifest_filters is not None else []
self.meta_filters = meta_filters if meta_filters is not None else []
self.manifests = test_manifests
self.tests = None
self.disabled_tests = None
self.include_https = include_https
self.chunk_type = chunk_type
self.total_chunks = total_chunks
self.chunk_number = chunk_number
self.chunker = {"none": Unchunked,
"hash": HashChunker,
"equal_time": EqualTimeChunker}[chunk_type](total_chunks,
chunk_number)
self._test_ids = None
self.directory_manifests = {}
self._load_tests()
@property
def test_ids(self):
if self._test_ids is None:
self._test_ids = []
for test_dict in [self.disabled_tests, self.tests]:
for test_type in self.test_types:
self._test_ids += [item.id for item in test_dict[test_type]]
return self._test_ids
def get_test(self, manifest_test, inherit_metadata, test_metadata):
if test_metadata is not None:
inherit_metadata.append(test_metadata)
test_metadata = test_metadata.get_test(manifest_test.id)
return wpttest.from_manifest(manifest_test, inherit_metadata, test_metadata)
def load_dir_metadata(self, test_manifest, metadata_path, test_path):
rv = []
path_parts = os.path.dirname(test_path).split(os.path.sep)
for i in xrange(1,len(path_parts) + 1):
path = os.path.join(os.path.sep.join(path_parts[:i]), "__dir__.ini")
if path not in self.directory_manifests:
self.directory_manifests[path] = manifestexpected.get_dir_manifest(
metadata_path, path, self.run_info)
manifest = self.directory_manifests[path]
if manifest is not None:
rv.append(manifest)
return rv
def load_metadata(self, test_manifest, metadata_path, test_path):
inherit_metadata = self.load_dir_metadata(test_manifest, metadata_path, test_path)
test_metadata = manifestexpected.get_manifest(
metadata_path, test_path, test_manifest.url_base, self.run_info)
return inherit_metadata, test_metadata
def iter_tests(self):
manifest_items = []
for manifest in self.manifests.keys():
manifest_iter = iterfilter(self.manifest_filters,
manifest.itertypes(*self.test_types))
manifest_items.extend(manifest_iter)
if self.chunker is not None:
manifest_items = self.chunker(manifest_items)
for test_path, tests in manifest_items:
manifest_file = iter(tests).next().manifest
metadata_path = self.manifests[manifest_file]["metadata_path"]
inherit_metadata, test_metadata = self.load_metadata(manifest_file, metadata_path, test_path)
for test in iterfilter(self.meta_filters,
self.iter_wpttest(inherit_metadata, test_metadata, tests)):
yield test_path, test.test_type, test
def iter_wpttest(self, inherit_metadata, test_metadata, tests):
for manifest_test in tests:
yield self.get_test(manifest_test, inherit_metadata, test_metadata)
def _load_tests(self):
"""Read in the tests from the manifest file and add them to a queue"""
tests = {"enabled":defaultdict(list),
"disabled":defaultdict(list)}
for test_path, test_type, test in self.iter_tests():
enabled = not test.disabled()
if not self.include_https and test.environment["protocol"] == "https":
enabled = False
key = "enabled" if enabled else "disabled"
tests[key][test_type].append(test)
self.tests = tests["enabled"]
self.disabled_tests = tests["disabled"]
def groups(self, test_types, chunk_type="none", total_chunks=1, chunk_number=1):
groups = set()
for test_type in test_types:
for test in self.tests[test_type]:
group = test.url.split("/")[1]
groups.add(group)
return groups
class TestSource(object):
__metaclass__ = ABCMeta
@abstractmethod
def queue_tests(self, test_queue):
pass
@abstractmethod
def requeue_test(self, test):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class SingleTestSource(TestSource):
def __init__(self, test_queue):
self.test_queue = test_queue
@classmethod
def queue_tests(cls, test_queue, test_type, tests):
for test in tests[test_type]:
test_queue.put(test)
def get_queue(self):
if self.test_queue.empty():
return None
return self.test_queue
def requeue_test(self, test):
self.test_queue.put(test)
class PathGroupedSource(TestSource):
def __init__(self, test_queue):
self.test_queue = test_queue
self.current_queue = None
@classmethod
def queue_tests(cls, test_queue, test_type, tests, depth=None):
if depth is True:
depth = None
prev_path = None
group = None
for test in tests[test_type]:
path = urlparse.urlsplit(test.url).path.split("/")[1:-1][:depth]
if path != prev_path:
group = []
test_queue.put(group)
prev_path = path
group.append(test)
def get_queue(self):
if not self.current_queue or self.current_queue.empty():
try:
data = self.test_queue.get(block=True, timeout=1)
self.current_queue = Queue()
for item in data:
self.current_queue.put(item)
except Empty:
return None
return self.current_queue
def requeue_test(self, test):
self.current_queue.put(test)
def __exit__(self, *args, **kwargs):
if self.current_queue:
self.current_queue.close()
| mpl-2.0 | -8,916,607,221,963,879,000 | -1,167,928,474,071,328,300 | 34.808065 | 105 | 0.570019 | false |
lisa-groundhog/GroundHog | groundhog/layers/ff_layers.py | 16 | 18887 | """
Feedforward layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class MultiLayer(Layer):
"""
Implementing a standard feed forward MLP
"""
def __init__(self,
rng,
n_in,
n_hids=[500,500],
activation='TT.tanh',
scale=0.01,
sparsity=-1,
rank_n_approx=0,
rank_n_activ='lambda x: x',
weight_noise=False,
dropout = 1.,
init_fn='sample_weights_classic',
bias_fn='init_bias',
bias_scale = 0.,
learn_bias = True,
grad_scale = 1.,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type rank_n_approx: int
:param rank_n_approx: It applies to the first layer only. If
positive and larger than 0, the first weight matrix is
factorized into two matrices. The first one goes from input to
`rank_n_approx` hidden units, the second from `rank_n_approx` to
the number of units on the second layer
:type rank_n_activ: string or function
:param rank_n_activ: Function that is applied on on the intermediary
layer formed from factorizing the first weight matrix (Q: do we
need this?)
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type learn_bias: bool
:param learn_bias: flag, saying if we should learn the bias or keep
it constant
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
assert rank_n_approx >= 0, "Please enter a valid rank_n_approx"
self.rank_n_approx = rank_n_approx
if isinstance(rank_n_activ, (str, unicode)):
rank_n_activ = eval(rank_n_activ)
self.rank_n_activ = rank_n_activ
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
self.n_layers = n_layers
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * n_layers
if bias_fn not in (list, tuple):
bias_fn = [bias_fn] * n_layers
if init_fn not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if isinstance(bias_fn[dx], (str, unicode)):
bias_fn[dx] = eval(bias_fn[dx])
if isinstance(init_fn[dx], (str, unicode)):
init_fn[dx] = eval(init_fn[dx])
if isinstance(activation[dx], (str, unicode)):
activation[dx] = eval(activation[dx])
super(MultiLayer, self).__init__(n_in, n_hids[-1], rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.activation = activation
self.scale = scale
self.sparsity = sparsity
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self._grad_scale = grad_scale
self.weight_noise = weight_noise
self.dropout = dropout
self.n_hids = n_hids
self.learn_bias = learn_bias
self._init_params()
def _init_params(self):
"""
Initialize the parameters of the layer, either by using sparse initialization or small
isotropic noise.
"""
self.W_ems = []
self.b_ems = []
if self.rank_n_approx:
W_em1 = self.init_fn[0](self.n_in,
self.rank_n_approx,
self.sparsity[0],
self.scale[0],
self.rng)
W_em2 = self.init_fn[0](self.rank_n_approx,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em1 = theano.shared(W_em1,
name='W1_0_%s'%self.name)
self.W_em2 = theano.shared(W_em2,
name='W2_0_%s'%self.name)
self.W_ems = [self.W_em1, self.W_em2]
else:
W_em = self.init_fn[0](self.n_in,
self.n_hids[0],
self.sparsity[0],
self.scale[0],
self.rng)
self.W_em = theano.shared(W_em,
name='W_0_%s'%self.name)
self.W_ems = [self.W_em]
self.b_em = theano.shared(
self.bias_fn[0](self.n_hids[0], self.bias_scale[0],self.rng),
name='b_0_%s'%self.name)
self.b_ems = [self.b_em]
for dx in xrange(1, self.n_layers):
W_em = self.init_fn[dx](self.n_hids[dx-1] / self.pieces[dx],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
self.rng)
W_em = theano.shared(W_em,
name='W_%d_%s'%(dx,self.name))
self.W_ems += [W_em]
b_em = theano.shared(
self.bias_fn[dx](self.n_hids[dx], self.bias_scale[dx],self.rng),
name='b_%d_%s'%(dx,self.name))
self.b_ems += [b_em]
self.params = [x for x in self.W_ems]
if self.learn_bias and self.learn_bias!='last':
self.params = [x for x in self.W_ems] + [x for x in self.b_ems]
elif self.learn_bias == 'last':
self.params = [x for x in self.W_ems] + [x for x in
self.b_ems][:-1]
self.params_grad_scale = [self._grad_scale for x in self.params]
if self.weight_noise:
self.nW_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_ems]
self.nb_ems = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_ems]
self.noise_params = [x for x in self.nW_ems] + [x for x in self.nb_ems]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def fprop(self, state_below, use_noise=True, no_noise_bias=False,
first_only = False):
"""
Constructs the computational graph of this layer.
If the input is ints, we assume is an index, otherwise we assume is
a set of floats.
"""
if self.weight_noise and use_noise and self.noise_params:
W_ems = [(x+y) for x, y in zip(self.W_ems, self.nW_ems)]
if not no_noise_bias:
b_ems = [(x+y) for x, y in zip(self.b_ems, self.nb_ems)]
else:
b_ems = self.b_ems
else:
W_ems = self.W_ems
b_ems = self.b_ems
if self.rank_n_approx:
if first_only:
emb_val = self.rank_n_activ(utils.dot(state_below, W_ems[0]))
self.out = emb_val
return emb_val
emb_val = TT.dot(
self.rank_n_activ(utils.dot(state_below, W_ems[0])),
W_ems[1])
if b_ems:
emb_val += b_ems[0]
st_pos = 1
else:
emb_val = utils.dot(state_below, W_ems[0])
if b_ems:
emb_val += b_ems[0]
st_pos = 0
emb_val = self.activation[0](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
for dx in xrange(1, self.n_layers):
emb_val = utils.dot(emb_val, W_ems[st_pos+dx])
if b_ems:
emb_val = self.activation[dx](emb_val+ b_ems[dx])
else:
emb_val = self.activation[dx](emb_val)
if self.dropout < 1.:
if use_noise:
emb_val = emb_val * self.trng.binomial(emb_val.shape, n=1, p=self.dropout, dtype=emb_val.dtype)
else:
emb_val = emb_val * self.dropout
self.out = emb_val
return emb_val
class LastState(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, ntimes = False, n = TT.constant(0)):
"""
:type ntimes: bool
:param ntimes: If the last state needs to be repeated `n` times
:type n: int, theano constant, None
:param n: how many times the last state is repeated
"""
self.ntimes = ntimes
self.n = n
super(LastState, self).__init__(0, 0, None)
def fprop(self, all_states):
if self.ntimes:
stateshape0 = all_states.shape[0]
shape0 = TT.switch(TT.gt(self.n, 0), self.n, all_states.shape[0])
single_frame = TT.shape_padleft(all_states[stateshape0-1])
mask = TT.alloc(numpy.float32(1), shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
single_frame = all_states[all_states.shape[0]-1]
self.out = single_frame
return single_frame
last = LastState()
last_ntimes = LastState(ntimes=True)
class GaussianNoise(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, rng, std = 0.1, ndim=0, avg =0, shape_fn=None):
"""
"""
assert rng is not None, "random number generator should not be empty!"
super(GaussianNoise, self).__init__(0, 0, rng)
self.std = scale
self.avg = self.avg
self.ndim = ndim
self.shape_fn = shape_fn
if self.shape_fn:
# Name is not important as it is not a parameter of the model
self.noise_term = theano.shared(numpy.zeros((2,)*ndim,
dtype=theano.config.floatX),
name='ndata')
self.noise_params += [self.noise_term]
self.noise_params_shape_fn += [shape_fn]
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, x):
self.out = x
if self.scale:
if self.shape_fn:
self.out += self.noise_term
else:
self.out += self.trng.normal(self.out.shape, std=self.std,
avg = self.avg,
dtype=self.out.dtype)
return self.out
class BinaryOp(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, op = 'lambda x,y: x+y', name=None):
if type(op) is str:
op = eval(op)
self.op = op
super(BinaryOp, self).__init__(0, 0, None, name)
def fprop(self, x, y):
self.out = self.op(x, y)
return self.out
class DropOp(Layer):
"""
This layers randomly drops elements of the input by multiplying with a
mask sampled from a binomial distribution
"""
def __init__(self, rng = None, name=None, dropout=1.):
super(DropOp, self).__init__(0, 0, None, name)
self.dropout = dropout
if dropout < 1.:
self.trng = RandomStreams(rng.randint(1e5))
def fprop(self, state_below, use_noise = True):
self.out = state_below
if self.dropout < 1.:
if use_noise:
self.out = self.out * self.trng.binomial(self.out.shape,
n=1,
p=self.dropout,
dtype=self.out.dtype)
else:
self.out = self.out * self.dropout
return self.out
class UnaryOp(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, activation = 'lambda x: x', name=None):
if type(activation) is str:
activation = eval(activation)
self.activation = activation
super(UnaryOp, self).__init__(0, 0, None, name)
def fprop(self, state_below):
self.out = self.activation(state_below)
return self.out
tanh = UnaryOp('lambda x: TT.tanh(x)')
sigmoid = UnaryOp('lambda x: TT.nnet.sigmoid(x)')
rectifier = UnaryOp('lambda x: x*(x>0)')
hard_sigmoid = UnaryOp('lambda x: x*(x>0)*(x<1)')
hard_tanh = UnaryOp('lambda x: x*(x>-1)*(x<1)')
class Shift(Layer):
"""
This layer is used to construct the embedding of the encoder by taking
the last state of the recurrent model
"""
def __init__(self, n=1, name=None):
self.n = n
super(Shift, self).__init__(0, 0, None, name)
def fprop(self, var):
rval = TT.zeros_like(var)
if self.n >0:
rval = TT.set_subtensor(rval[self.n:], var[:-self.n])
elif self.n<0:
rval = TT.set_subtensor(rval[:self.n], var[-self.n:])
self.out = rval
return rval
class MinPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MinPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.min(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
minpool = MinPooling()
minpool_ntimes = MinPooling(ntimes=True)
class MaxPooling(Layer):
"""
This layer is used to construct an embedding of the encoder by doing a
max pooling over the hidden state
"""
def __init__(self, ntimes=False, name=None):
self.ntimes = ntimes
super(MaxPooling, self).__init__(0, 0, None, name)
def fprop(self, all_states):
shape0 = all_states.shape[0]
single_frame = all_states.max(0)
if self.ntimes:
single_frame = TT.shape_padleft(all_states.max(0))
mask = TT.alloc(numpy.float32(1),
shape0, *[1 for k in xrange(all_states.ndim-1)])
rval = single_frame * mask
self.out = rval
return rval
self.out = single_frame
return single_frame
maxpool = MaxPooling()
maxpool_ntimes = MaxPooling(ntimes=True)
class Concatenate(Layer):
def __init__(self, axis):
self.axis = axis
Layer.__init__(self, 0, 0, None)
def fprop(self, *args):
self.out = TT.concatenate(args, axis=self.axis)
return self.out
| bsd-3-clause | 6,481,124,935,488,004,000 | 1,317,105,471,098,430,500 | 35.960861 | 115 | 0.533753 | false |
rabernat/xmitgcm | xmitgcm/_version.py | 2 | 18453 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "xmitgcm-"
cfg.versionfile_source = "xmitgcm/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| mit | -3,901,876,198,574,884,400 | -6,576,874,522,851,447,000 | 34.486538 | 79 | 0.575245 | false |
aselle/tensorflow | tensorflow/contrib/timeseries/python/timeseries/math_utils.py | 14 | 43089 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities used by time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib import lookup
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def clip_covariance(
covariance_matrix, maximum_variance_ratio, minimum_variance):
"""Enforce constraints on a covariance matrix to improve numerical stability.
Args:
covariance_matrix: A [..., N, N] batch of covariance matrices.
maximum_variance_ratio: The maximum allowed ratio of two diagonal
entries. Any entries lower than the maximum entry divided by this ratio
will be set to that value.
minimum_variance: A floor for diagonal entries in the returned matrix.
Returns:
A new covariance matrix with the requested constraints enforced. If the
input was positive definite, the output will be too.
"""
# TODO(allenl): Smarter scaling here so that correlations are preserved when
# fiddling with diagonal elements.
diagonal = array_ops.matrix_diag_part(covariance_matrix)
maximum = math_ops.reduce_max(diagonal, axis=-1, keepdims=True)
new_diagonal = gen_math_ops.maximum(
diagonal, maximum / maximum_variance_ratio)
return array_ops.matrix_set_diag(
covariance_matrix, math_ops.maximum(new_diagonal, minimum_variance))
def block_diagonal(matrices, dtype=dtypes.float32, name="block_diagonal"):
r"""Constructs block-diagonal matrices from a list of batched 2D tensors.
Args:
matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of
matrices with the same batch dimension).
dtype: Data type to use. The Tensors in `matrices` must match this dtype.
name: A name for the returned op.
Returns:
A matrix with the input matrices stacked along its main diagonal, having
shape [..., \sum_i N_i, \sum_i M_i].
"""
matrices = [ops.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]
blocked_rows = tensor_shape.Dimension(0)
blocked_cols = tensor_shape.Dimension(0)
batch_shape = tensor_shape.TensorShape(None)
for matrix in matrices:
full_matrix_shape = matrix.get_shape().with_rank_at_least(2)
batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])
blocked_rows += full_matrix_shape[-2]
blocked_cols += full_matrix_shape[-1]
ret_columns_list = []
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
ret_columns_list.append(matrix_shape[-1])
ret_columns = math_ops.add_n(ret_columns_list)
row_blocks = []
current_column = 0
for matrix in matrices:
matrix_shape = array_ops.shape(matrix)
row_before_length = current_column
current_column += matrix_shape[-1]
row_after_length = ret_columns - current_column
row_blocks.append(
array_ops.pad(
tensor=matrix,
paddings=array_ops.concat(
[
array_ops.zeros(
[array_ops.rank(matrix) - 1, 2], dtype=dtypes.int32), [(
row_before_length, row_after_length)]
],
axis=0)))
blocked = array_ops.concat(row_blocks, -2, name=name)
blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))
return blocked
def power_sums_tensor(array_size, power_matrix, multiplier):
r"""Computes \sum_{i=0}^{N-1} A^i B (A^i)^T for N=0..(array_size + 1).
Args:
array_size: The number of non-trivial sums to pre-compute.
power_matrix: The "A" matrix above.
multiplier: The "B" matrix above
Returns:
A Tensor with S[N] = \sum_{i=0}^{N-1} A^i B (A^i)^T
S[0] is the zero matrix
S[1] is B
S[2] is A B A^T + B
...and so on
"""
array_size = math_ops.cast(array_size, dtypes.int32)
power_matrix = ops.convert_to_tensor(power_matrix)
identity_like_power_matrix = linalg_ops.eye(
array_ops.shape(power_matrix)[0], dtype=power_matrix.dtype)
identity_like_power_matrix.set_shape(
ops.convert_to_tensor(power_matrix).get_shape())
transition_powers = functional_ops.scan(
lambda previous_power, _: math_ops.matmul(previous_power, power_matrix),
math_ops.range(array_size - 1),
initializer=identity_like_power_matrix)
summed = math_ops.cumsum(
array_ops.concat([
array_ops.expand_dims(multiplier, 0), math_ops.matmul(
batch_times_matrix(transition_powers, multiplier),
transition_powers,
adjoint_b=True)
], 0))
return array_ops.concat(
[array_ops.expand_dims(array_ops.zeros_like(multiplier), 0), summed], 0)
def matrix_to_powers(matrix, powers):
"""Raise a single matrix to multiple powers."""
matrix_tiled = array_ops.tile(
array_ops.expand_dims(matrix, 0), [array_ops.size(powers), 1, 1])
return batch_matrix_pow(matrix_tiled, powers)
def batch_matrix_pow(matrices, powers):
"""Compute powers of matrices, e.g. A^3 = matmul(matmul(A, A), A).
Uses exponentiation by squaring, with O(log(p)) matrix multiplications to
compute A^p.
Args:
matrices: [batch size x N x N]
powers: Which integer power to raise each matrix to [batch size]
Returns:
The matrices raised to their respective powers, same dimensions as the
"matrices" argument.
"""
def terminate_when_all_zero(current_argument, residual_powers, accumulator):
del current_argument, accumulator # not used for condition
do_exit = math_ops.reduce_any(
math_ops.greater(residual_powers, array_ops.ones_like(residual_powers)))
return do_exit
def do_iteration(current_argument, residual_powers, accumulator):
"""Compute one step of iterative exponentiation by squaring.
The recursive form is:
power(A, p) = { power(matmul(A, A), p / 2) for even p
{ matmul(A, power(matmul(A, A), (p - 1) / 2)) for odd p
power(A, 0) = I
The power(A, 0) = I case is handled by starting with accumulator set to the
identity matrix; matrices with zero residual powers are passed through
unchanged.
Args:
current_argument: On this step, what is the first argument (A^2..^2) to
the (unrolled) recursive function? [batch size x N x N]
residual_powers: On this step, what is the second argument (residual p)?
[batch_size]
accumulator: Accumulates the exterior multiplications from the odd
powers (initially the identity matrix). [batch_size x N x N]
Returns:
Updated versions of each argument for one step of the unrolled
computation. Does not change parts of the batch which have a residual
power of zero.
"""
is_even = math_ops.equal(residual_powers % 2,
array_ops.zeros(
array_ops.shape(residual_powers),
dtype=dtypes.int32))
new_accumulator = array_ops.where(is_even, accumulator,
math_ops.matmul(accumulator,
current_argument))
new_argument = math_ops.matmul(current_argument, current_argument)
do_update = math_ops.greater(residual_powers, 1)
new_residual_powers = residual_powers - residual_powers % 2
new_residual_powers //= 2
# Stop updating if we've reached our base case; some batch elements may
# finish sooner than others
accumulator = array_ops.where(do_update, new_accumulator, accumulator)
current_argument = array_ops.where(do_update, new_argument,
current_argument)
residual_powers = array_ops.where(do_update, new_residual_powers,
residual_powers)
return (current_argument, residual_powers, accumulator)
matrices = ops.convert_to_tensor(matrices)
powers = math_ops.cast(powers, dtype=dtypes.int32)
ident = array_ops.expand_dims(
array_ops.diag(
array_ops.ones([array_ops.shape(matrices)[1]], dtype=matrices.dtype)),
0)
ident_tiled = array_ops.tile(ident, [array_ops.shape(matrices)[0], 1, 1])
(final_argument,
final_residual_power, final_accumulator) = control_flow_ops.while_loop(
terminate_when_all_zero, do_iteration, [matrices, powers, ident_tiled])
return array_ops.where(
math_ops.equal(final_residual_power,
array_ops.zeros_like(
final_residual_power, dtype=dtypes.int32)),
ident_tiled, math_ops.matmul(final_argument, final_accumulator))
# TODO(allenl): would be useful if this was built into batch_matmul
def batch_times_matrix(batch, matrix, adj_x=False, adj_y=False):
"""Multiply a batch of matrices by a single matrix.
Functionally equivalent to:
tf.matmul(batch, array_ops.tile(gen_math_ops.expand_dims(matrix, 0),
[array_ops.shape(batch)[0], 1, 1]),
adjoint_a=adj_x, adjoint_b=adj_y)
Args:
batch: [batch_size x N x M] after optional transpose
matrix: [M x P] after optional transpose
adj_x: If true, transpose the second two dimensions of "batch" before
multiplying.
adj_y: If true, transpose "matrix" before multiplying.
Returns:
[batch_size x N x P]
"""
batch = ops.convert_to_tensor(batch)
matrix = ops.convert_to_tensor(matrix)
assert batch.get_shape().ndims == 3
assert matrix.get_shape().ndims == 2
if adj_x:
batch = array_ops.transpose(batch, [0, 2, 1])
batch_dimension = batch.get_shape()[0].value
first_dimension = batch.get_shape()[1].value
tensor_batch_shape = array_ops.shape(batch)
if batch_dimension is None:
batch_dimension = tensor_batch_shape[0]
if first_dimension is None:
first_dimension = tensor_batch_shape[1]
matrix_first_dimension, matrix_second_dimension = matrix.get_shape().as_list()
batch_reshaped = array_ops.reshape(batch, [-1, tensor_batch_shape[2]])
if adj_y:
if matrix_first_dimension is None:
matrix_first_dimension = array_ops.shape(matrix)[0]
result_shape = [batch_dimension, first_dimension, matrix_first_dimension]
else:
if matrix_second_dimension is None:
matrix_second_dimension = array_ops.shape(matrix)[1]
result_shape = [batch_dimension, first_dimension, matrix_second_dimension]
return array_ops.reshape(
math_ops.matmul(batch_reshaped, matrix, adjoint_b=adj_y), result_shape)
def matrix_times_batch(matrix, batch, adj_x=False, adj_y=False):
"""Like batch_times_matrix, but with the multiplication order swapped."""
return array_ops.transpose(
batch_times_matrix(
batch=batch, matrix=matrix, adj_x=not adj_y, adj_y=not adj_x),
[0, 2, 1])
def make_toeplitz_matrix(inputs, name=None):
"""Make a symmetric Toeplitz matrix from input array of values.
Args:
inputs: a 3-D tensor of shape [num_blocks, block_size, block_size].
name: the name of the operation.
Returns:
a symmetric Toeplitz matrix of shape
[num_blocks*block_size, num_blocks*block_size].
"""
num_blocks = array_ops.shape(inputs)[0]
block_size = array_ops.shape(inputs)[1]
output_size = block_size * num_blocks
lags = array_ops.reshape(math_ops.range(num_blocks), shape=[1, -1])
indices = math_ops.abs(lags - array_ops.transpose(lags))
output = array_ops.gather(inputs, indices)
output = array_ops.reshape(
array_ops.transpose(output, [0, 2, 1, 3]), [output_size, output_size])
return array_ops.identity(output, name=name)
# TODO(allenl): Investigate alternative parameterizations.
def sign_magnitude_positive_definite(
raw, off_diagonal_scale=0., overall_scale=0.):
"""Constructs a positive definite matrix from an unconstrained input matrix.
We want to keep the whole matrix on a log scale, but also allow off-diagonal
elements to be negative, so the sign of off-diagonal elements is modeled
separately from their magnitude (using the lower and upper triangles
respectively). Specifically:
for i < j, we have:
output_cholesky[i, j] = raw[j, i] / (abs(raw[j, i]) + 1) *
exp((off_diagonal_scale + overall_scale + raw[i, j]) / 2)
output_cholesky[i, i] = exp((raw[i, i] + overall_scale) / 2)
output = output_cholesky^T * output_cholesky
where raw, off_diagonal_scale, and overall_scale are
un-constrained real-valued variables. The resulting values are stable
around zero due to the exponential (and the softsign keeps the function
smooth).
Args:
raw: A [..., M, M] Tensor.
off_diagonal_scale: A scalar or [...] shaped Tensor controlling the relative
scale of off-diagonal values in the output matrix.
overall_scale: A scalar or [...] shaped Tensor controlling the overall scale
of the output matrix.
Returns:
The `output` matrix described above, a [..., M, M] positive definite matrix.
"""
raw = ops.convert_to_tensor(raw)
diagonal = array_ops.matrix_diag_part(raw)
def _right_pad_with_ones(tensor, target_rank):
# Allow broadcasting even if overall_scale and off_diagonal_scale have batch
# dimensions
tensor = ops.convert_to_tensor(tensor, dtype=raw.dtype.base_dtype)
return array_ops.reshape(tensor,
array_ops.concat(
[
array_ops.shape(tensor), array_ops.ones(
[target_rank - array_ops.rank(tensor)],
dtype=target_rank.dtype)
],
axis=0))
# We divide the log values by 2 to compensate for the squaring that happens
# when transforming Cholesky factors into positive definite matrices.
sign_magnitude = (gen_math_ops.exp(
(raw + _right_pad_with_ones(off_diagonal_scale, array_ops.rank(raw)) +
_right_pad_with_ones(overall_scale, array_ops.rank(raw))) / 2.) *
nn.softsign(array_ops.matrix_transpose(raw)))
sign_magnitude.set_shape(raw.get_shape())
cholesky_factor = array_ops.matrix_set_diag(
input=array_ops.matrix_band_part(sign_magnitude, 0, -1),
diagonal=gen_math_ops.exp((diagonal + _right_pad_with_ones(
overall_scale, array_ops.rank(diagonal))) / 2.))
return math_ops.matmul(cholesky_factor, cholesky_factor, transpose_a=True)
def transform_to_covariance_matrices(input_vectors, matrix_size):
"""Construct covariance matrices via transformations from input_vectors.
Args:
input_vectors: A [batch size x input size] batch of vectors to transform.
matrix_size: An integer indicating one dimension of the (square) output
matrix.
Returns:
A [batch size x matrix_size x matrix_size] batch of covariance matrices.
"""
combined_values = layers.fully_connected(
input_vectors, matrix_size**2 + 2, activation_fn=None)
return sign_magnitude_positive_definite(
raw=array_ops.reshape(combined_values[..., :-2],
array_ops.concat([
array_ops.shape(combined_values)[:-1],
[matrix_size, matrix_size]
], 0)),
off_diagonal_scale=combined_values[..., -2],
overall_scale=combined_values[..., -1])
def variable_covariance_matrix(
size, name, dtype, initial_diagonal_values=None,
initial_overall_scale_log=0.):
"""Construct a Variable-parameterized positive definite matrix.
Useful for parameterizing covariance matrices.
Args:
size: The size of the main diagonal, the returned matrix having shape [size
x size].
name: The name to use when defining variables and ops.
dtype: The floating point data type to use.
initial_diagonal_values: A Tensor with shape [size] with initial values for
the diagonal values of the returned matrix. Must be positive.
initial_overall_scale_log: Initial value of the bias term for every element
of the matrix in log space.
Returns:
A Variable-parameterized covariance matrix with shape [size x size].
"""
raw_values = variable_scope.get_variable(
name + "_pre_transform",
dtype=dtype,
shape=[size, size],
initializer=init_ops.zeros_initializer())
if initial_diagonal_values is not None:
raw_values += array_ops.matrix_diag(math_ops.log(initial_diagonal_values))
return array_ops.identity(
sign_magnitude_positive_definite(
raw=raw_values,
off_diagonal_scale=variable_scope.get_variable(
name + "_off_diagonal_scale",
dtype=dtype,
initializer=constant_op.constant(-5., dtype=dtype)),
overall_scale=ops.convert_to_tensor(
initial_overall_scale_log, dtype=dtype) +
variable_scope.get_variable(
name + "_overall_scale",
dtype=dtype,
shape=[],
initializer=init_ops.zeros_initializer())),
name=name)
def batch_start_time(times):
return times[:, 0]
def batch_end_time(times):
return times[:, -1]
def log_noninformative_covariance_prior(covariance):
"""Compute a relatively uninformative prior for noise parameters.
Helpful for avoiding noise over-estimation, where noise otherwise decreases
very slowly during optimization.
See:
Villegas, C. On the A Priori Distribution of the Covariance Matrix.
Ann. Math. Statist. 40 (1969), no. 3, 1098--1099.
Args:
covariance: A covariance matrix.
Returns:
For a [p x p] matrix:
log(det(covariance)^(-(p + 1) / 2))
"""
# Avoid zero/negative determinants due to numerical errors
covariance += array_ops.diag(1e-8 * array_ops.ones(
shape=[array_ops.shape(covariance)[0]], dtype=covariance.dtype))
power = -(math_ops.cast(array_ops.shape(covariance)[0] + 1,
covariance.dtype) / 2.)
return power * math_ops.log(linalg_ops.matrix_determinant(covariance))
def entropy_matched_cauchy_scale(covariance):
"""Approximates a similar Cauchy distribution given a covariance matrix.
Since Cauchy distributions do not have moments, entropy matching provides one
way to set a Cauchy's scale parameter in a way that provides a similar
distribution. The effect is dividing the standard deviation of an independent
Gaussian by a constant very near 3.
To set the scale of the Cauchy distribution, we first select the diagonals of
`covariance`. Since this ignores cross terms, it overestimates the entropy of
the Gaussian. For each of these variances, we solve for the Cauchy scale
parameter which gives the same entropy as the Gaussian with that
variance. This means setting the (univariate) Gaussian entropy
0.5 * ln(2 * variance * pi * e)
equal to the Cauchy entropy
ln(4 * pi * scale)
Solving, we get scale = sqrt(variance * (e / (8 pi))).
Args:
covariance: A [batch size x N x N] batch of covariance matrices to produce
Cauchy scales for.
Returns:
A [batch size x N] set of Cauchy scale parameters for each part of the batch
and each dimension of the input Gaussians.
"""
return math_ops.sqrt(math.e / (8. * math.pi) *
array_ops.matrix_diag_part(covariance))
class TensorValuedMutableDenseHashTable(lookup.MutableDenseHashTable):
"""A version of MutableDenseHashTable which stores arbitrary Tensor shapes.
Since MutableDenseHashTable only allows vectors right now, simply adds reshape
ops on both ends.
"""
def __init__(self, key_dtype, value_dtype, default_value, *args, **kwargs):
self._non_vector_value_shape = array_ops.shape(default_value)
super(TensorValuedMutableDenseHashTable, self).__init__(
key_dtype=key_dtype,
value_dtype=value_dtype,
default_value=array_ops.reshape(default_value, [-1]),
*args,
**kwargs)
def insert(self, keys, values, name=None):
keys = ops.convert_to_tensor(keys, dtype=self._key_dtype)
keys_flat = array_ops.reshape(keys, [-1])
return super(TensorValuedMutableDenseHashTable, self).insert(
keys=keys_flat,
# Each key has one corresponding value, so the shape of the tensor of
# values for every key is key_shape + value_shape
values=array_ops.reshape(values, [array_ops.shape(keys_flat)[0], -1]),
name=name)
def lookup(self, keys, name=None):
keys_flat = array_ops.reshape(
ops.convert_to_tensor(keys, dtype=self._key_dtype), [-1])
return array_ops.reshape(
super(TensorValuedMutableDenseHashTable, self).lookup(
keys=keys_flat, name=name),
array_ops.concat([array_ops.shape(keys), self._non_vector_value_shape],
0))
class TupleOfTensorsLookup(lookup.LookupInterface):
"""A LookupInterface with nested tuples of Tensors as values.
Creates one MutableDenseHashTable per value Tensor, which has some unnecessary
overhead.
"""
def __init__(
self, key_dtype, default_values, empty_key, name, checkpoint=True):
default_values_flat = nest.flatten(default_values)
self._hash_tables = nest.pack_sequence_as(
default_values,
[TensorValuedMutableDenseHashTable(
key_dtype=key_dtype,
value_dtype=default_value.dtype.base_dtype,
default_value=default_value,
empty_key=empty_key,
name=name + "_{}".format(table_number),
checkpoint=checkpoint)
for table_number, default_value
in enumerate(default_values_flat)])
self._name = name
def lookup(self, keys):
return nest.pack_sequence_as(
self._hash_tables,
[hash_table.lookup(keys)
for hash_table in nest.flatten(self._hash_tables)])
def insert(self, keys, values):
nest.assert_same_structure(self._hash_tables, values)
# Avoid race conditions by requiring that all inputs are computed before any
# inserts happen (an issue if one key's update relies on another's value).
values_flat = [array_ops.identity(value) for value in nest.flatten(values)]
with ops.control_dependencies(values_flat):
insert_ops = [hash_table.insert(keys, value)
for hash_table, value
in zip(nest.flatten(self._hash_tables),
values_flat)]
return control_flow_ops.group(*insert_ops)
def check_table_dtypes(self, key_dtype, value_dtype):
# dtype checking is done in the objects in self._hash_tables
pass
def replicate_state(start_state, batch_size):
"""Create batch versions of state.
Takes a list of Tensors, adds a batch dimension, and replicates
batch_size times across that batch dimension. Used to replicate the
non-batch state returned by get_start_state in define_loss.
Args:
start_state: Model-defined state to replicate.
batch_size: Batch dimension for data.
Returns:
Replicated versions of the state.
"""
flattened_state = nest.flatten(start_state)
replicated_state = [
array_ops.tile(
array_ops.expand_dims(state_nonbatch, 0),
array_ops.concat([[batch_size], array_ops.ones(
[array_ops.rank(state_nonbatch)], dtype=dtypes.int32)], 0))
for state_nonbatch in flattened_state
]
return nest.pack_sequence_as(start_state, replicated_state)
Moments = collections.namedtuple("Moments", ["mean", "variance"])
# Currently all of these statistics are computed incrementally (i.e. are updated
# every time a new mini-batch of training data is presented) when this object is
# created in InputStatisticsFromMiniBatch.
InputStatistics = collections.namedtuple(
"InputStatistics",
["series_start_moments", # The mean and variance of each feature in a chunk
# (with a size configured in the statistics
# object) at the start of the series. A tuple of
# (mean, variance), each with shape [number of
# features], floating point. One use is in state
# space models, to keep priors calibrated even as
# earlier parts of the series are presented. If
# this object was created by
# InputStatisticsFromMiniBatch, these moments are
# computed based on the earliest chunk of data
# presented so far. However, there is a race
# condition in the update, so these may reflect
# statistics later in the series, but should
# eventually reflect statistics in a chunk at the
# series start.
"overall_feature_moments", # The mean and variance of each feature over
# the entire series. A tuple of (mean,
# variance), each with shape [number of
# features]. If this object was created by
# InputStatisticsFromMiniBatch, these moments
# are estimates based on the data seen so far.
"start_time", # The first (lowest) time in the series, a scalar
# integer. If this object was created by
# InputStatisticsFromMiniBatch, this is the lowest time seen
# so far rather than the lowest time that will ever be seen
# (guaranteed to be at least as low as the lowest time
# presented in the current minibatch).
"total_observation_count", # Count of data points, a scalar integer. If
# this object was created by
# InputStatisticsFromMiniBatch, this is an
# estimate of the total number of observations
# in the whole dataset computed based on the
# density of the series and the minimum and
# maximum times seen.
])
# TODO(allenl): It would be nice to do something with full series statistics
# when the user provides that.
class InputStatisticsFromMiniBatch(object):
"""Generate statistics from mini-batch input."""
def __init__(self, num_features, dtype, starting_variance_window_size=16):
"""Configure the input statistics object.
Args:
num_features: Number of features for the time series
dtype: The floating point data type to use.
starting_variance_window_size: The number of datapoints to use when
computing the mean and variance at the start of the series.
"""
self._starting_variance_window_size = starting_variance_window_size
self._num_features = num_features
self._dtype = dtype
def initialize_graph(self, features, update_statistics=True):
"""Create any ops needed to provide input statistics.
Should be called before statistics are requested.
Args:
features: A dictionary, the output of a `TimeSeriesInputFn` (with keys
TrainEvalFeatures.TIMES and TrainEvalFeatures.VALUES).
update_statistics: Whether `features` should be used to update adaptive
statistics. Typically True for training and false for evaluation.
Returns:
An InputStatistics object composed of Variables, which will be updated
based on mini-batches of data if requested.
"""
if (TrainEvalFeatures.TIMES in features
and TrainEvalFeatures.VALUES in features):
times = features[TrainEvalFeatures.TIMES]
values = features[TrainEvalFeatures.VALUES]
else:
# times and values may not be available, for example during prediction. We
# still need to retrieve our variables so that they can be read from, even
# if we're not going to update them.
times = None
values = None
# Create/retrieve variables representing input statistics, initialized
# without data to avoid deadlocking if variables are initialized before
# queue runners are started.
with variable_scope.variable_scope("input_statistics", use_resource=True):
statistics = self._create_variable_statistics_object()
with variable_scope.variable_scope(
"input_statistics_auxiliary", use_resource=True):
# Secondary statistics, necessary for the incremental computation of the
# primary statistics (e.g. counts and sums for computing a mean
# incrementally).
auxiliary_variables = self._AdaptiveInputAuxiliaryStatistics(
num_features=self._num_features, dtype=self._dtype)
if update_statistics and times is not None and values is not None:
# If we have times and values from mini-batch input, create update ops to
# take the new data into account.
assign_op = self._update_statistics_from_mini_batch(
statistics, auxiliary_variables, times, values)
with ops.control_dependencies([assign_op]):
stat_variables = nest.pack_sequence_as(statistics, [
array_ops.identity(tensor) for tensor in nest.flatten(statistics)
])
# Since start time updates have a race condition, ensure that the
# reported start time is at least as low as the lowest time in this
# mini-batch. The start time should converge on the correct value
# eventually even with the race condition, but for example state space
# models have an assertion which could fail without this
# post-processing.
return stat_variables._replace(start_time=gen_math_ops.minimum(
stat_variables.start_time, math_ops.reduce_min(times)))
else:
return statistics
class _AdaptiveInputAuxiliaryStatistics(collections.namedtuple(
"_AdaptiveInputAuxiliaryStatistics",
["max_time_seen", # The maximum time seen (best effort if updated from
# multiple workers; see notes about race condition
# below).
"chunk_count", # The number of chunks seen.
"inter_observation_duration_sum", # The sum across chunks of their "time
# density" (number of times per
# example).
"example_count", # The number of examples seen (each example has a
# single time associated with it and one or more
# real-valued features).
"overall_feature_sum", # The sum of values for each feature. Shape
# [number of features].
"overall_feature_sum_of_squares", # The sum of squared values for each
# feature. Shape [number of features]
])):
"""Extra statistics used to incrementally update InputStatistics."""
def __new__(cls, num_features, dtype):
return super(
InputStatisticsFromMiniBatch # pylint: disable=protected-access
._AdaptiveInputAuxiliaryStatistics,
cls).__new__(
cls,
max_time_seen=variable_scope.get_variable(
name="max_time_seen",
initializer=dtypes.int64.min,
dtype=dtypes.int64,
trainable=False),
chunk_count=variable_scope.get_variable(
name="chunk_count",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int64,
trainable=False),
inter_observation_duration_sum=variable_scope.get_variable(
name="inter_observation_duration_sum",
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtype,
trainable=False),
example_count=variable_scope.get_variable(
name="example_count",
shape=[],
dtype=dtypes.int64,
trainable=False),
overall_feature_sum=variable_scope.get_variable(
name="overall_feature_sum",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
overall_feature_sum_of_squares=variable_scope.get_variable(
name="overall_feature_sum_of_squares",
shape=[num_features],
dtype=dtype,
initializer=init_ops.zeros_initializer(),
trainable=False))
def _update_statistics_from_mini_batch(
self, statistics, auxiliary_variables, times, values):
"""Given mini-batch input, update `statistics` and `auxiliary_variables`."""
values = math_ops.cast(values, self._dtype)
# The density (measured in times per observation) that we see in each part
# of the mini-batch.
batch_inter_observation_duration = (math_ops.cast(
math_ops.reduce_max(times, axis=1) - math_ops.reduce_min(times, axis=1),
self._dtype) / math_ops.cast(
array_ops.shape(times)[1] - 1, self._dtype))
# Co-locate updates with their variables to minimize race conditions when
# updating statistics.
with ops.colocate_with(auxiliary_variables.max_time_seen):
# There is a race condition if this value is being updated from multiple
# workers. However, it should eventually reach the correct value if the
# last chunk is presented enough times.
max_time_seen_assign = state_ops.assign(
auxiliary_variables.max_time_seen,
gen_math_ops.maximum(auxiliary_variables.max_time_seen,
math_ops.reduce_max(times)))
with ops.colocate_with(auxiliary_variables.chunk_count):
chunk_count_assign = state_ops.assign_add(auxiliary_variables.chunk_count,
array_ops.shape(
times,
out_type=dtypes.int64)[0])
with ops.colocate_with(auxiliary_variables.inter_observation_duration_sum):
inter_observation_duration_assign = state_ops.assign_add(
auxiliary_variables.inter_observation_duration_sum,
math_ops.reduce_sum(batch_inter_observation_duration))
with ops.colocate_with(auxiliary_variables.example_count):
example_count_assign = state_ops.assign_add(
auxiliary_variables.example_count,
array_ops.size(times, out_type=dtypes.int64))
# Note: These mean/variance updates assume that all points are equally
# likely, which is not true if _chunks_ are sampled uniformly from the space
# of all possible contiguous chunks, since points at the start and end of
# the series are then members of fewer chunks. For series which are much
# longer than the chunk size (the usual/expected case), this effect becomes
# irrelevant.
with ops.colocate_with(auxiliary_variables.overall_feature_sum):
overall_feature_sum_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum,
math_ops.reduce_sum(values, axis=[0, 1]))
with ops.colocate_with(auxiliary_variables.overall_feature_sum_of_squares):
overall_feature_sum_of_squares_assign = state_ops.assign_add(
auxiliary_variables.overall_feature_sum_of_squares,
math_ops.reduce_sum(values**2, axis=[0, 1]))
per_chunk_aux_updates = control_flow_ops.group(
max_time_seen_assign, chunk_count_assign,
inter_observation_duration_assign, example_count_assign,
overall_feature_sum_assign, overall_feature_sum_of_squares_assign)
with ops.control_dependencies([per_chunk_aux_updates]):
example_count_float = math_ops.cast(auxiliary_variables.example_count,
self._dtype)
new_feature_mean = (auxiliary_variables.overall_feature_sum /
example_count_float)
overall_feature_mean_update = state_ops.assign(
statistics.overall_feature_moments.mean, new_feature_mean)
overall_feature_var_update = state_ops.assign(
statistics.overall_feature_moments.variance,
# De-biased n / (n - 1) variance correction
example_count_float / (example_count_float - 1.) *
(auxiliary_variables.overall_feature_sum_of_squares /
example_count_float - new_feature_mean**2))
# TODO(b/35675805): Remove this cast
min_time_batch = math_ops.cast(math_ops.argmin(times[:, 0]), dtypes.int32)
def series_start_updates():
# If this is the lowest-time chunk that we have seen so far, update
# series start moments to reflect that. Note that these statistics are
# "best effort", as there are race conditions in the update (however,
# they should eventually converge if the start of the series is
# presented enough times).
mean, variance = nn.moments(
values[min_time_batch, :self._starting_variance_window_size],
axes=[0])
return control_flow_ops.group(
state_ops.assign(statistics.series_start_moments.mean, mean),
state_ops.assign(statistics.series_start_moments.variance,
variance))
with ops.colocate_with(statistics.start_time):
series_start_update = control_flow_ops.cond(
# Update moments whenever we even match the lowest time seen so far,
# to ensure that series start statistics are eventually updated to
# their correct values, despite race conditions (i.e. eventually
# statistics.start_time will reflect the global lowest time, and
# given that we will eventually update the series start moments to
# their correct values).
math_ops.less_equal(times[min_time_batch, 0],
statistics.start_time),
series_start_updates,
control_flow_ops.no_op)
with ops.control_dependencies([series_start_update]):
# There is a race condition if this update is performed in parallel on
# multiple workers. Since models may be sensitive to being presented
# with times before the putative start time, the value of this
# variable is post-processed above to guarantee that each worker is
# presented with a start time which is at least as low as the lowest
# time in its current mini-batch.
start_time_update = state_ops.assign(statistics.start_time,
gen_math_ops.minimum(
statistics.start_time,
math_ops.reduce_min(times)))
inter_observation_duration_estimate = (
auxiliary_variables.inter_observation_duration_sum / math_ops.cast(
auxiliary_variables.chunk_count, self._dtype))
# Estimate the total number of observations as:
# (end time - start time + 1) * average intra-chunk time density
total_observation_count_update = state_ops.assign(
statistics.total_observation_count,
math_ops.cast(
gen_math_ops.round(
math_ops.cast(auxiliary_variables.max_time_seen -
statistics.start_time + 1, self._dtype) /
inter_observation_duration_estimate), dtypes.int64))
per_chunk_stat_updates = control_flow_ops.group(
overall_feature_mean_update, overall_feature_var_update,
series_start_update, start_time_update,
total_observation_count_update)
return per_chunk_stat_updates
def _create_variable_statistics_object(self):
"""Creates non-trainable variables representing input statistics."""
series_start_moments = Moments(
mean=variable_scope.get_variable(
name="series_start_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="series_start_variance",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
overall_feature_moments = Moments(
mean=variable_scope.get_variable(
name="overall_feature_mean",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.zeros_initializer(),
trainable=False),
variance=variable_scope.get_variable(
name="overall_feature_var",
shape=[self._num_features],
dtype=self._dtype,
initializer=init_ops.ones_initializer(),
trainable=False))
start_time = variable_scope.get_variable(
name="start_time",
dtype=dtypes.int64,
initializer=dtypes.int64.max,
trainable=False)
total_observation_count = variable_scope.get_variable(
name="total_observation_count",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.ones_initializer(),
trainable=False)
return InputStatistics(
series_start_moments=series_start_moments,
overall_feature_moments=overall_feature_moments,
start_time=start_time,
total_observation_count=total_observation_count)
| apache-2.0 | 8,629,913,300,048,588,000 | -5,929,365,988,442,760,000 | 44.309148 | 90 | 0.64455 | false |
TAMU-CPT/galaxy-tools | tools/gff3/gff3_filter.py | 1 | 1553 | #!/usr/bin/env python
import sys
import logging
import argparse
from cpt_gffParser import gffParse, gffWrite
from gff3 import feature_lambda, feature_test_qual_value
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def gff_filter(gff3, id_list=None, id="", attribute_field="ID", subfeatures=True):
attribute_field = attribute_field.split("__cn__")
if id_list:
filter_strings = [line.strip() for line in id_list]
else:
filter_strings = [x.strip() for x in id.split("__cn__")]
for rec in gffParse(gff3):
rec.features = feature_lambda(
rec.features,
feature_test_qual_value,
{"qualifier": attribute_field, "attribute_list": filter_strings},
subfeatures=subfeatures,
)
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="extract features from a GFF3 file based on ID/qualifiers"
)
parser.add_argument("gff3", type=argparse.FileType("r"), help="GFF3 annotations")
parser.add_argument("--id_list", type=argparse.FileType("r"))
parser.add_argument("--id", type=str)
parser.add_argument(
"--attribute_field",
type=str,
help="Column 9 Field to search against",
default="ID",
)
parser.add_argument(
"--subfeatures",
action="store_true",
help="Retain subfeature tree of matched features",
)
args = parser.parse_args()
gff_filter(**vars(args))
| gpl-3.0 | 4,078,550,912,084,758,000 | -719,674,272,405,218,000 | 31.354167 | 85 | 0.627817 | false |
greggian/TapdIn | django/contrib/localflavor/us/models.py | 1 | 1132 | from django.conf import settings
from django.db.models.fields import Field
class USStateField(Field):
def get_internal_type(self):
return "USStateField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'CHAR(2)'
else:
return 'varchar(2)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USStateSelect
defaults = {'widget': USStateSelect}
defaults.update(kwargs)
return super(USStateField, self).formfield(**defaults)
class PhoneNumberField(Field):
def get_internal_type(self):
return "PhoneNumberField"
def db_type(self):
if settings.DATABASE_ENGINE == 'oracle':
return 'VARCHAR2(20)'
else:
return 'varchar(20)'
def formfield(self, **kwargs):
from django.contrib.localflavor.us.forms import USPhoneNumberField
defaults = {'form_class': USPhoneNumberField}
defaults.update(kwargs)
return super(PhoneNumberField, self).formfield(**defaults)
| apache-2.0 | -3,969,364,050,156,616,700 | -1,308,910,345,913,809,200 | 30.342857 | 74 | 0.614841 | false |
seraphlnWu/in_trip | in_trip/scripts/change_data_from_hbase_to_pg.py | 1 | 1620 | #coding=utf-8
import time
import cPickle
from in_trip.store_data.views import pg_db,conn
import logging
logger = logging.getLogger('parser')
def creat_table():
sql_str = '''
create table "tmp_hbase_to_pg"(
data text,
timestamp float(24)
)
'''
pg_db.execute(sql_str)
conn.commit()
def insert_data(o_dict, default_value):
data =cPickle.dumps({
'o_dict' : o_dict,
'default_value' : default_value
})
sql_str = '''
insert into tmp_hbase_to_pg
(data,timestamp)
values
(%s,%s);
'''
try:
pg_db.execute(sql_str,(data,time.time()))
conn.commit()
except Exception as e:
conn.rollback()
logger.error('insert to pg error: %s', e)
def get_data_all():
sql_str = '''
select * from tmp_hbase_to_pg;
'''
pg_db.execute(sql_str)
print pg_db.fetchall()
def get_data(offset,limit=1000):
sql_str = '''
select * from tmp_hbase_to_pg limit(%s) offset(%s);
'''
pg_db.execute(sql_str,(limit,offset))
return pg_db.fetchall()
def insert_into_hbase():
from in_trip.store_data.hbase.run import insert_data as hbase_insert
offset = 0
limit = 1000
while True:
res_list = get_data(offset,limit)
if not res_list:
break
offset = offset + limit
for item in res_list:
tmp_data = cPickle.loads(item[0])
hbase_insert(tmp_data['o_dict'],tmp_data['default_value'])
return True
if __name__ == "__main__":
creat_table()
print "success!"
| mit | -689,901,992,704,685,700 | -3,891,956,008,004,941,000 | 22.478261 | 72 | 0.557407 | false |
ngpestelos/ansible | test/units/parsing/vault/test_vault_editor.py | 142 | 6762 | # (c) 2014, James Tanner <[email protected]>
# (c) 2014, James Cammarata, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#!/usr/bin/env python
import sys
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.utils.unicode import to_bytes, to_unicode
from ansible import errors
from ansible.parsing.vault import VaultLib
from ansible.parsing.vault import VaultEditor
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
v10_data = """$ANSIBLE_VAULT;1.0;AES
53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
83c62ffb04c2512995e815de4b4d29ed"""
v11_data = """$ANSIBLE_VAULT;1.1;AES256
62303130653266653331306264616235333735323636616539316433666463323964623162386137
3961616263373033353631316333623566303532663065310a393036623466376263393961326530
64336561613965383835646464623865663966323464653236343638373165343863623638316664
3631633031323837340a396530313963373030343933616133393566366137363761373930663833
3739"""
class TestVaultEditor(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_methods_exist(self):
v = VaultEditor(None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
@patch.object(VaultEditor, '_editor_shell_command')
def test_create_file(self, mock_editor_shell_command):
def sc_side_effect(filename):
return ['touch', filename]
mock_editor_shell_command.side_effect = sc_side_effect
tmp_file = tempfile.NamedTemporaryFile()
os.unlink(tmp_file.name)
ve = VaultEditor("ansible")
ve.create_file(tmp_file.name)
self.assertTrue(os.path.exists(tmp_file.name))
def test_decrypt_1_0(self):
"""
Skip testing decrypting 1.0 files if we don't have access to AES, KDF or
Counter, or we are running on python3 since VaultAES hasn't been backported.
"""
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3':
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v10_file.name)
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = to_unicode(f.read())
f.close()
os.unlink(v10_file.name)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
v11_file = tempfile.NamedTemporaryFile(delete=False)
with v11_file as f:
f.write(to_bytes(v11_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v11_file.name, "rb")
fdata = to_unicode(f.read())
f.close()
os.unlink(v11_file.name)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_rekey_migration(self):
"""
Skip testing rekeying files if we don't have access to AES, KDF or
Counter, or we are running on python3 since VaultAES hasn't been backported.
"""
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or sys.version > '3':
raise SkipTest
v10_file = tempfile.NamedTemporaryFile(delete=False)
with v10_file as f:
f.write(to_bytes(v10_data))
ve = VaultEditor("ansible")
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file(v10_file.name, 'ansible2')
except errors.AnsibleError as e:
error_hit = True
# verify decrypted content
f = open(v10_file.name, "rb")
fdata = f.read()
f.close()
assert error_hit == False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError as e:
error_hit = True
os.unlink(v10_file.name)
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit == False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
| gpl-3.0 | -1,029,303,583,961,452,500 | -3,328,899,555,293,043,700 | 30.598131 | 104 | 0.654984 | false |
Eksmo/calibre | src/odf/odf2xhtml.py | 10 | 68488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#import pdb
#pdb.set_trace()
from xml.sax import handler
from xml.sax.saxutils import escape, quoteattr
from xml.dom import Node
from opendocument import load
from namespaces import ANIMNS, CHARTNS, CONFIGNS, DCNS, DR3DNS, DRAWNS, FONS, \
FORMNS, MATHNS, METANS, NUMBERNS, OFFICENS, PRESENTATIONNS, SCRIPTNS, \
SMILNS, STYLENS, SVGNS, TABLENS, TEXTNS, XLINKNS
if False: # Added by Kovid
DR3DNS, MATHNS, CHARTNS, CONFIGNS, ANIMNS, FORMNS, SMILNS, SCRIPTNS
# Handling of styles
#
# First there are font face declarations. These set up a font style that will be
# referenced from a text-property. The declaration describes the font making
# it possible for the application to find a similar font should the system not
# have that particular one. The StyleToCSS stores these attributes to be used
# for the CSS2 font declaration.
#
# Then there are default-styles. These set defaults for various style types:
# "text", "paragraph", "section", "ruby", "table", "table-column", "table-row",
# "table-cell", "graphic", "presentation", "drawing-page", "chart".
# Since CSS2 can't refer to another style, ODF2XHTML add these to all
# styles unless overridden.
#
# The real styles are declared in the <style:style> element. They have a
# family referring to the default-styles, and may have a parent style.
#
# Styles have scope. The same name can be used for both paragraph and
# character etc. styles Since CSS2 has no scope we use a prefix. (Not elegant)
# In ODF a style can have a parent, these parents can be chained.
class StyleToCSS:
""" The purpose of the StyleToCSS class is to contain the rules to convert
ODF styles to CSS2. Since it needs the generic fonts, it would probably
make sense to also contain the Styles in a dict as well..
"""
def __init__(self):
# Font declarations
self.fontdict = {}
# Fill-images from presentations for backgrounds
self.fillimages = {}
self.ruleconversions = {
(DRAWNS,u'fill-image-name'): self.c_drawfillimage,
(FONS,u"background-color"): self.c_fo,
(FONS,u"border"): self.c_fo,
(FONS,u"border-bottom"): self.c_fo,
(FONS,u"border-left"): self.c_fo,
(FONS,u"border-right"): self.c_fo,
(FONS,u"border-top"): self.c_fo,
(FONS,u"break-after"): self.c_break, # Added by Kovid
(FONS,u"break-before"): self.c_break,# Added by Kovid
(FONS,u"color"): self.c_fo,
(FONS,u"font-family"): self.c_fo,
(FONS,u"font-size"): self.c_fo,
(FONS,u"font-style"): self.c_fo,
(FONS,u"font-variant"): self.c_fo,
(FONS,u"font-weight"): self.c_fo,
(FONS,u"line-height"): self.c_fo,
(FONS,u"margin"): self.c_fo,
(FONS,u"margin-bottom"): self.c_fo,
(FONS,u"margin-left"): self.c_fo,
(FONS,u"margin-right"): self.c_fo,
(FONS,u"margin-top"): self.c_fo,
(FONS,u"min-height"): self.c_fo,
(FONS,u"padding"): self.c_fo,
(FONS,u"padding-bottom"): self.c_fo,
(FONS,u"padding-left"): self.c_fo,
(FONS,u"padding-right"): self.c_fo,
(FONS,u"padding-top"): self.c_fo,
(FONS,u"page-width"): self.c_page_width,
(FONS,u"page-height"): self.c_page_height,
(FONS,u"text-align"): self.c_text_align,
(FONS,u"text-indent") :self.c_fo,
(TABLENS,u'border-model') :self.c_border_model,
(STYLENS,u'column-width') : self.c_width,
(STYLENS,u"font-name"): self.c_fn,
(STYLENS,u'horizontal-pos'): self.c_hp,
(STYLENS,u'text-position'): self.c_text_position,
(STYLENS,u'text-line-through-style'): self.c_text_line_through_style,
(STYLENS,u'text-underline-style'): self.c_text_underline_style,
(STYLENS,u'width') : self.c_width,
# FIXME Should do style:vertical-pos here
}
def save_font(self, name, family, generic):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Fortunately ODF provides generic fallbacks.
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
This method put the font and fallback into a dictionary
"""
htmlgeneric = "sans-serif"
if generic == "roman": htmlgeneric = "serif"
elif generic == "swiss": htmlgeneric = "sans-serif"
elif generic == "modern": htmlgeneric = "monospace"
elif generic == "decorative": htmlgeneric = "sans-serif"
elif generic == "script": htmlgeneric = "monospace"
elif generic == "system": htmlgeneric = "serif"
self.fontdict[name] = (family, htmlgeneric)
def c_drawfillimage(self, ruleset, sdict, rule, val):
""" Fill a figure with an image. Since CSS doesn't let you resize images
this should really be implemented as an absolutely position <img>
with a width and a height
"""
sdict['background-image'] = "url('%s')" % self.fillimages[val]
def c_fo(self, ruleset, sdict, rule, val):
""" XSL formatting attributes """
selector = rule[1]
sdict[selector] = val
def c_break(self, ruleset, sdict, rule, val): # Added by Kovid
property = 'page-' + rule[1]
values = {'auto': 'auto', 'column': 'always', 'page': 'always',
'even-page': 'left', 'odd-page': 'right',
'inherit': 'inherit'}
sdict[property] = values.get(val, 'auto')
def c_border_model(self, ruleset, sdict, rule, val):
""" Convert to CSS2 border model """
if val == 'collapsing':
sdict['border-collapse'] ='collapse'
else:
sdict['border-collapse'] ='separate'
def c_width(self, ruleset, sdict, rule, val):
""" Set width of box """
sdict['width'] = val
def c_text_align(self, ruleset, sdict, rule, align):
""" Text align """
if align == "start": align = "left"
if align == "end": align = "right"
sdict['text-align'] = align
def c_fn(self, ruleset, sdict, rule, fontstyle):
""" Generate the CSS font family
A generic font can be found in two ways. In a <style:font-face>
element or as a font-family-generic attribute in text-properties.
"""
generic = ruleset.get((STYLENS,'font-family-generic') )
if generic is not None:
self.save_font(fontstyle, fontstyle, generic)
family, htmlgeneric = self.fontdict.get(fontstyle, (fontstyle, 'serif'))
sdict['font-family'] = '%s, %s' % (family, htmlgeneric)
def c_text_position(self, ruleset, sdict, rule, tp):
""" Text position. This is used e.g. to make superscript and subscript
This attribute can have one or two values.
The first value must be present and specifies the vertical
text position as a percentage that relates to the current font
height or it takes one of the values sub or super. Negative
percentages or the sub value place the text below the
baseline. Positive percentages or the super value place
the text above the baseline. If sub or super is specified,
the application can choose an appropriate text position.
The second value is optional and specifies the font height
as a percentage that relates to the current font-height. If
this value is not specified, an appropriate font height is
used. Although this value may change the font height that
is displayed, it never changes the current font height that
is used for additional calculations.
"""
textpos = tp.split(' ')
if len(textpos) == 2 and textpos[0] != "0%":
# Bug in OpenOffice. If vertical-align is 0% - ignore the text size.
sdict['font-size'] = textpos[1]
if textpos[0] == "super":
sdict['vertical-align'] = "33%"
elif textpos[0] == "sub":
sdict['vertical-align'] = "-33%"
else:
sdict['vertical-align'] = textpos[0]
def c_hp(self, ruleset, sdict, rule, hpos):
#FIXME: Frames wrap-style defaults to 'parallel', graphics to 'none'.
# It is properly set in the parent-styles, but the program doesn't
# collect the information.
wrap = ruleset.get((STYLENS,'wrap'),'parallel')
# Can have: from-left, left, center, right, from-inside, inside, outside
if hpos == "center":
sdict['margin-left'] = "auto"
sdict['margin-right'] = "auto"
# else:
# # force it to be *something* then delete it
# sdict['margin-left'] = sdict['margin-right'] = ''
# del sdict['margin-left'], sdict['margin-right']
if hpos in ("right","outside"):
if wrap in ( "left", "parallel","dynamic"):
sdict['float'] = "right"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['right'] = "0";
else: # No wrapping
sdict['margin-left'] = "auto"
sdict['margin-right'] = "0px"
elif hpos in ("left", "inside"):
if wrap in ( "right", "parallel","dynamic"):
sdict['float'] = "left"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['left'] = "0"
else: # No wrapping
sdict['margin-left'] = "0px"
sdict['margin-right'] = "auto"
elif hpos in ("from-left", "from-inside"):
if wrap in ( "right", "parallel"):
sdict['float'] = "left"
else:
sdict['position'] = "relative" # No wrapping
if ruleset.has_key( (SVGNS,'x') ):
sdict['left'] = ruleset[(SVGNS,'x')]
def c_page_width(self, ruleset, sdict, rule, val):
""" Set width of box
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
sdict['width'] = val
def c_text_underline_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "underline"
def c_text_line_through_style(self, ruleset, sdict, rule, val):
""" Set underline decoration
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
if val and val != "none":
sdict['text-decoration'] = "line-through"
def c_page_height(self, ruleset, sdict, rule, val):
""" Set height of box """
sdict['height'] = val
def convert_styles(self, ruleset):
""" Rule is a tuple of (namespace, name). If the namespace is '' then
it is already CSS2
"""
sdict = {}
for rule,val in ruleset.items():
if rule[0] == '':
sdict[rule[1]] = val
continue
method = self.ruleconversions.get(rule, None )
if method:
method(ruleset, sdict, rule, val)
return sdict
class TagStack:
def __init__(self):
self.stack = []
def push(self, tag, attrs):
self.stack.append( (tag, attrs) )
def pop(self):
item = self.stack.pop()
return item
def stackparent(self):
item = self.stack[-1]
return item[1]
def rfindattr(self, attr):
""" Find a tag with the given attribute """
for tag, attrs in self.stack:
if attrs.has_key(attr):
return attrs[attr]
return None
def count_tags(self, tag):
c = 0
for ttag, tattrs in self.stack:
if ttag == tag: c = c + 1
return c
special_styles = {
'S-Emphasis':'em',
'S-Citation':'cite',
'S-Strong_20_Emphasis':'strong',
'S-Variable':'var',
'S-Definition':'dfn',
'S-Teletype':'tt',
'P-Heading_20_1':'h1',
'P-Heading_20_2':'h2',
'P-Heading_20_3':'h3',
'P-Heading_20_4':'h4',
'P-Heading_20_5':'h5',
'P-Heading_20_6':'h6',
# 'P-Caption':'caption',
'P-Addressee':'address',
# 'P-List_20_Heading':'dt',
# 'P-List_20_Contents':'dd',
'P-Preformatted_20_Text':'pre',
# 'P-Table_20_Heading':'th',
# 'P-Table_20_Contents':'td',
# 'P-Text_20_body':'p'
}
#-----------------------------------------------------------------------------
#
# ODFCONTENTHANDLER
#
#-----------------------------------------------------------------------------
class ODF2XHTML(handler.ContentHandler):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, generate_css=True, embedable=False):
# Tags
self.generate_css = generate_css
self.elements = {
(DCNS, 'title'): (self.s_processcont, self.e_dc_title),
(DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
(DCNS, 'creator'): (self.s_processcont, self.e_dc_creator),
(DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
(DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'custom-shape'): (self.s_custom_shape, self.e_custom_shape),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
(METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
(METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
(METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
(OFFICENS, "annotation"):(self.s_ignorexml, None),
(OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
(OFFICENS, "document"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
(OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
(OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
(OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
(OFFICENS, "styles"):(self.s_office_styles, None),
(OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(OFFICENS, "settings"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
# (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
(STYLENS, "default-page-layout"):(self.s_ignorexml, None),
(STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
(STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
(STYLENS, "font-face"):(self.s_style_font_face, None),
# (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
# (STYLENS, "footer-style"):(self.s_style_footer_style, None),
(STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
(STYLENS, "handout-master"):(self.s_ignorexml, None),
# (STYLENS, "header"):(self.s_style_header, self.e_style_header),
# (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "header-style"):(self.s_style_header_style, None),
(STYLENS, "master-page"):(self.s_style_master_page, None),
(STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
(STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
(STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
(STYLENS, "style"):(self.s_style_style, self.e_style_style),
(STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-properties"):(self.s_style_handle_properties, None),
(STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'bookmark'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-start'): (self.s_text_bookmark, None),
(TEXTNS, 'bookmark-ref'): (self.s_text_bookmark_ref, self.e_text_a),
(TEXTNS, 'bookmark-ref-start'): (self.s_text_bookmark_ref, None),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
}
if embedable:
self.make_embedable()
self._resetobject()
def set_plain(self):
""" Tell the parser to not generate CSS """
self.generate_css = False
def set_embedable(self):
""" Tells the converter to only output the parts inside the <body>"""
self.elements[(OFFICENS, u"text")] = (None,None)
self.elements[(OFFICENS, u"spreadsheet")] = (None,None)
self.elements[(OFFICENS, u"presentation")] = (None,None)
self.elements[(OFFICENS, u"document-content")] = (None,None)
def add_style_file(self, stylefilename, media=None):
""" Add a link to an external style file.
Also turns of the embedding of styles in the HTML
"""
self.use_internal_css = False
self.stylefilename = stylefilename
if media:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s" media="%s"/>\n' % (stylefilename,media))
else:
self.metatags.append('<link rel="stylesheet" type="text/css" href="%s"/>\n' % (stylefilename))
def _resetfootnotes(self):
# Footnotes and endnotes
self.notedict = {}
self.currentnote = 0
self.notebody = ''
def _resetobject(self):
self.lines = []
self._wfunc = self._wlines
self.xmlfile = ''
self.title = ''
self.language = ''
self.creator = ''
self.data = []
self.tagstack = TagStack()
self.htmlstack = []
self.pstack = []
self.processelem = True
self.processcont = True
self.listtypes = {}
self.headinglevels = [0, 0,0,0,0,0, 0,0,0,0,0] # level 0 to 10
self.use_internal_css = True
self.cs = StyleToCSS()
self.anchors = {}
# Style declarations
self.stylestack = []
self.styledict = {}
self.currentstyle = None
self._resetfootnotes()
# Tags from meta.xml
self.metatags = []
def writeout(self, s):
if s != '':
self._wfunc(s)
def writedata(self):
d = ''.join(self.data)
if d != '':
self.writeout(escape(d))
def opentag(self, tag, attrs={}, block=False):
""" Create an open HTML tag """
self.htmlstack.append((tag,attrs,block))
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
if len(a) == 0:
self.writeout("<%s>" % tag)
else:
self.writeout("<%s %s>" % (tag, " ".join(a)))
if block == True:
self.writeout("\n")
def closetag(self, tag, block=True):
""" Close an open HTML tag """
self.htmlstack.pop()
self.writeout("</%s>" % tag)
if block == True:
self.writeout("\n")
def emptytag(self, tag, attrs={}):
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
self.writeout("<%s %s/>\n" % (tag, " ".join(a)))
#--------------------------------------------------
# Interface to parser
#--------------------------------------------------
def characters(self, data):
if self.processelem and self.processcont:
self.data.append(data)
def startElementNS(self, tag, qname, attrs):
self.pstack.append( (self.processelem, self.processcont) )
if self.processelem:
method = self.elements.get(tag, (None, None) )[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag,attrs)
self.tagstack.push( tag, attrs )
def endElementNS(self, tag, qname):
stag, attrs = self.tagstack.pop()
if self.processelem:
method = self.elements.get(tag, (None, None) )[1]
if method:
self.handle_endtag(tag, attrs, method)
else:
self.unknown_endtag(tag, attrs)
self.processelem, self.processcont = self.pstack.pop()
#--------------------------------------------------
def handle_starttag(self, tag, method, attrs):
method(tag,attrs)
def handle_endtag(self, tag, attrs, method):
method(tag, attrs)
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag, attrs):
pass
def s_ignorexml(self, tag, attrs):
""" Ignore this xml element and all children of it
It will automatically stop ignoring
"""
self.processelem = False
def s_ignorecont(self, tag, attrs):
""" Stop processing the text nodes """
self.processcont = False
def s_processcont(self, tag, attrs):
""" Start processing the text nodes """
self.processcont = True
def classname(self, attrs):
""" Generate a class name from a style name """
c = attrs.get((TEXTNS,'style-name'),'')
c = c.replace(".","_")
return c
def get_anchor(self, name):
""" Create a unique anchor id for a href name """
if not self.anchors.has_key(name):
# Changed by Kovid
self.anchors[name] = "anchor%d" % (len(self.anchors) + 1)
return self.anchors.get(name)
#--------------------------------------------------
def purgedata(self):
self.data = []
#-----------------------------------------------------------------------------
#
# Handle meta data
#
#-----------------------------------------------------------------------------
def e_dc_title(self, tag, attrs):
""" Get the title from the meta data and create a HTML <title>
"""
self.title = ''.join(self.data)
#self.metatags.append('<title>%s</title>\n' % escape(self.title))
self.data = []
def e_dc_metatag(self, tag, attrs):
""" Any other meta data is added as a <meta> element
"""
self.metatags.append('<meta name="%s" content=%s/>\n' % (tag[1], quoteattr(''.join(self.data))))
self.data = []
def e_dc_contentlanguage(self, tag, attrs):
""" Set the content language. Identifies the targeted audience
"""
self.language = ''.join(self.data)
self.metatags.append('<meta http-equiv="content-language" content="%s"/>\n' % escape(self.language))
self.data = []
def e_dc_creator(self, tag, attrs):
""" Set the content creator. Identifies the targeted audience
"""
self.creator = ''.join(self.data)
self.metatags.append('<meta http-equiv="creator" content="%s"/>\n' % escape(self.creator))
self.data = []
def s_custom_shape(self, tag, attrs):
""" A <draw:custom-shape> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:absolute;'
elif anchor_type == 'char':
style = "position:absolute;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position: absolute;"
if attrs.has_key( (SVGNS,"width") ):
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if attrs.has_key( (SVGNS,"height") ):
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if attrs.has_key( (SVGNS,"x") ):
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if attrs.has_key( (SVGNS,"y") ):
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_custom_shape(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_frame(self, tag, attrs):
""" A <draw:frame> is made into a <div> in HTML which is then styled
"""
anchor_type = attrs.get((TEXTNS,'anchor-type'),'notfound')
htmltag = 'div'
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
if anchor_type == "paragraph":
style = 'position:relative;'
elif anchor_type == 'char':
style = "position:relative;"
elif anchor_type == 'as-char':
htmltag = 'div'
style = ''
else:
style = "position:absolute;"
if attrs.has_key( (SVGNS,"width") ):
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if attrs.has_key( (SVGNS,"height") ):
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if attrs.has_key( (SVGNS,"x") ):
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if attrs.has_key( (SVGNS,"y") ):
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
if self.generate_css:
self.opentag(htmltag, {'class': name, 'style': style})
else:
self.opentag(htmltag)
def e_draw_frame(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_fill_image(self, tag, attrs):
name = attrs.get( (DRAWNS,'name'), "NoName")
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
self.cs.fillimages[name] = imghref
def rewritelink(self, imghref):
""" Intended to be overloaded if you don't store your pictures
in a Pictures subfolder
"""
return imghref
def s_draw_image(self, tag, attrs):
""" A <draw:image> becomes an <img/> element
"""
parent = self.tagstack.stackparent()
anchor_type = parent.get((TEXTNS,'anchor-type'))
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
htmlattrs = {'alt':"", 'src':imghref }
if self.generate_css:
if anchor_type != "char":
htmlattrs['style'] = "display: block;"
self.emptytag('img', htmlattrs)
def s_draw_object(self, tag, attrs):
""" A <draw:object> is embedded object in the document (e.g. spreadsheet in presentation).
"""
return # Added by Kovid
objhref = attrs[(XLINKNS,"href")]
# Remove leading "./": from "./Object 1" to "Object 1"
# objhref = objhref [2:]
# Not using os.path.join since it fails to find the file on Windows.
# objcontentpath = '/'.join([objhref, 'content.xml'])
for c in self.document.childnodes:
if c.folder == objhref:
self._walknode(c.topnode)
def s_draw_object_ole(self, tag, attrs):
""" A <draw:object-ole> is embedded OLE object in the document (e.g. MS Graph).
"""
try:
class_id = attrs[(DRAWNS,"class-id")]
except KeyError: # Added by Kovid to ignore <draw> without the right
return # attributes
if class_id and class_id.lower() == "00020803-0000-0000-c000-000000000046": ## Microsoft Graph 97 Chart
tagattrs = { 'name':'object_ole_graph', 'class':'ole-graph' }
self.opentag('a', tagattrs)
self.closetag('a', tagattrs)
def s_draw_page(self, tag, attrs):
""" A <draw:page> is a slide in a presentation. We use a <fieldset> element in HTML.
Therefore if you convert a ODP file, you get a series of <fieldset>s.
Override this for your own purpose.
"""
name = attrs.get( (DRAWNS,'name'), "NoName")
stylename = attrs.get( (DRAWNS,'style-name'), "")
stylename = stylename.replace(".","_")
masterpage = attrs.get( (DRAWNS,'master-page-name'),"")
masterpage = masterpage.replace(".","_")
if self.generate_css:
self.opentag('fieldset', {'class':"DP-%s MP-%s" % (stylename, masterpage) })
else:
self.opentag('fieldset')
self.opentag('legend')
self.writeout(escape(name))
self.closetag('legend')
def e_draw_page(self, tag, attrs):
self.closetag('fieldset')
def s_draw_textbox(self, tag, attrs):
style = ''
if attrs.has_key( (FONS,"min-height") ):
style = style + "min-height:" + attrs[(FONS,"min-height")] + ";"
self.opentag('div')
# self.opentag('div', {'style': style})
def e_draw_textbox(self, tag, attrs):
""" End the <draw:text-box>
"""
self.closetag('div')
def html_body(self, tag, attrs):
self.writedata()
if self.generate_css and self.use_internal_css:
self.opentag('style', {'type':"text/css"}, True)
self.writeout('/*<![CDATA[*/\n')
self.generate_stylesheet()
self.writeout('/*]]>*/\n')
self.closetag('style')
self.purgedata()
self.closetag('head')
self.opentag('body', block=True)
# background-color: white removed by Kovid for #9118
# Specifying an explicit bg color prevents ebook readers
# from successfully inverting colors
default_styles = """
img { width: 100%; height: 100%; }
* { padding: 0; margin: 0; }
body { margin: 0 1em; }
ol, ul { padding-left: 2em; }
"""
def generate_stylesheet(self):
for name in self.stylestack:
styles = self.styledict.get(name)
# Preload with the family's default style
if styles.has_key('__style-family') and self.styledict.has_key(styles['__style-family']):
familystyle = self.styledict[styles['__style-family']].copy()
del styles['__style-family']
for style, val in styles.items():
familystyle[style] = val
styles = familystyle
# Resolve the remaining parent styles
while styles.has_key('__parent-style-name') and self.styledict.has_key(styles['__parent-style-name']):
parentstyle = self.styledict[styles['__parent-style-name']].copy()
del styles['__parent-style-name']
for style, val in styles.items():
parentstyle[style] = val
styles = parentstyle
self.styledict[name] = styles
# Write the styles to HTML
self.writeout(self.default_styles)
# Changed by Kovid to not write out endless copies of the same style
css_styles = {}
for name in self.stylestack:
styles = self.styledict.get(name)
css2 = tuple(self.cs.convert_styles(styles).iteritems())
if css2 in css_styles:
css_styles[css2].append(name)
else:
css_styles[css2] = [name]
def filter_margins(css2):
names = { k for k, v in css2 }
ignore = set()
if {'margin-left', 'margin-right', 'margin-top',
'margin-bottom'}.issubset(names):
# These come from XML and we cannot preserve XML attribute
# order so we assume that margin is to be overridden See
# https://bugs.launchpad.net/calibre/+bug/941134 and
# https://bugs.launchpad.net/calibre/+bug/1002702
ignore.add('margin')
css2 = sorted(css2, key=lambda x:{'margin':0}.get(x[0], 1))
for k, v in css2:
if k not in ignore:
yield k, v
for css2, names in css_styles.iteritems():
self.writeout("%s {\n" % ', '.join(names))
for style, val in filter_margins(css2):
self.writeout("\t%s: %s;\n" % (style, val) )
self.writeout("}\n")
def generate_footnotes(self):
if self.currentnote == 0:
return
if self.generate_css:
self.opentag('ol', {'style':'border-top: 1px solid black'}, True)
else:
self.opentag('ol')
for key in range(1,self.currentnote+1):
note = self.notedict[key]
# for key,note in self.notedict.items():
self.opentag('li', { 'id':"footnote-%d" % key })
# self.opentag('sup')
# self.writeout(escape(note['citation']))
# self.closetag('sup', False)
self.writeout(note['body'])
self.closetag('li')
self.closetag('ol')
def s_office_automatic_styles(self, tag, attrs):
if self.xmlfile == 'styles.xml':
self.autoprefix = "A"
else:
self.autoprefix = ""
def s_office_document_content(self, tag, attrs):
""" First tag in the content.xml file"""
self.writeout('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" ')
self.writeout('"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
self.opentag('html', {'xmlns':"http://www.w3.org/1999/xhtml"}, True)
self.opentag('head', block=True)
self.emptytag('meta', { 'http-equiv':"Content-Type", 'content':"text/html;charset=UTF-8"})
for metaline in self.metatags:
self.writeout(metaline)
self.writeout('<title>%s</title>\n' % escape(self.title))
def e_office_document_content(self, tag, attrs):
""" Last tag """
self.closetag('html')
def s_office_master_styles(self, tag, attrs):
""" """
def s_office_presentation(self, tag, attrs):
""" For some odd reason, OpenOffice Impress doesn't define a default-style
for the 'paragraph'. We therefore force a standard when we see
it is a presentation
"""
self.styledict['p'] = {(FONS,u'font-size'): u"24pt" }
self.styledict['presentation'] = {(FONS,u'font-size'): u"24pt" }
self.html_body(tag, attrs)
def e_office_presentation(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_spreadsheet(self, tag, attrs):
self.html_body(tag, attrs)
def e_office_spreadsheet(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_styles(self, tag, attrs):
self.autoprefix = ""
def s_office_text(self, tag, attrs):
""" OpenDocument text """
self.styledict['frame'] = { (STYLENS,'wrap'): u'parallel'}
self.html_body(tag, attrs)
def e_office_text(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_style_handle_properties(self, tag, attrs):
""" Copy all attributes to a struct.
We will later convert them to CSS2
"""
if self.currentstyle is None: # Added by Kovid
return
for key,attr in attrs.items():
self.styledict[self.currentstyle][key] = attr
familymap = {'frame':'frame', 'paragraph':'p', 'presentation':'presentation',
'text':'span','section':'div',
'table':'table','table-cell':'td','table-column':'col',
'table-row':'tr','graphic':'graphic' }
def s_style_default_style(self, tag, attrs):
""" A default style is like a style on an HTML tag
"""
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
self.currentstyle = htmlfamily
# self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_default_style(self, tag, attrs):
self.currentstyle = None
def s_style_font_face(self, tag, attrs):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Luckily ODF provides generic fallbacks
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
"""
name = attrs[(STYLENS,"name")]
family = attrs[(SVGNS,"font-family")]
generic = attrs.get( (STYLENS,'font-family-generic'),"" )
self.cs.save_font(name, family, generic)
def s_style_footer(self, tag, attrs):
self.opentag('div', { 'id':"footer" })
self.purgedata()
def e_style_footer(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_footer_style(self, tag, attrs):
self.currentstyle = "@print #footer"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_header(self, tag, attrs):
self.opentag('div', { 'id':"header" })
self.purgedata()
def e_style_header(self, tag, attrs):
self.writedata()
self.closetag('div')
self.purgedata()
def s_style_header_style(self, tag, attrs):
self.currentstyle = "@print #header"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_default_page_layout(self, tag, attrs):
""" Collect the formatting for the default page layout style.
"""
self.currentstyle = "@page"
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def s_style_page_layout(self, tag, attrs):
""" Collect the formatting for the page layout style.
This won't work in CSS 2.1, as page identifiers are not allowed.
It is legal in CSS3, but the rest of the application doesn't specify when to use what page layout
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".PL-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_page_layout(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_style_master_page(self, tag, attrs):
""" Collect the formatting for the page layout style.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".MP-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {('','position'):'relative'}
# Then load the pagelayout style if we find it
pagelayout = attrs.get( (STYLENS,'page-layout-name'), None)
if pagelayout:
pagelayout = ".PL-" + pagelayout
if self.styledict.has_key( pagelayout ):
styles = self.styledict[pagelayout]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = pagelayout
self.s_ignorexml(tag, attrs)
# Short prefixes for class selectors
_familyshort = {'drawing-page':'DP', 'paragraph':'P', 'presentation':'PR',
'text':'S', 'section':'D',
'table':'T', 'table-cell':'TD', 'table-column':'TC',
'table-row':'TR', 'graphic':'G' }
def s_style_style(self, tag, attrs):
""" Collect the formatting for the style.
Styles have scope. The same name can be used for both paragraph and
character styles Since CSS has no scope we use a prefix. (Not elegant)
In ODF a style can have a parent, these parents can be chained.
We may not have encountered the parent yet, but if we have, we resolve it.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
sfamily = self._familyshort.get(family,'X')
name = "%s%s-%s" % (self.autoprefix, sfamily, name)
parent = attrs.get( (STYLENS,'parent-style-name') )
self.currentstyle = special_styles.get(name,"."+name)
self.stylestack.append(self.currentstyle)
if not self.styledict.has_key(self.currentstyle):
self.styledict[self.currentstyle] = {}
self.styledict[self.currentstyle]['__style-family'] = htmlfamily
# Then load the parent style if we find it
if parent:
parent = "%s-%s" % (sfamily, parent)
parent = special_styles.get(parent, "."+parent)
if self.styledict.has_key( parent ):
styles = self.styledict[parent]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = parent
def e_style_style(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_table_table(self, tag, attrs):
""" Start a table
"""
c = attrs.get( (TABLENS,'style-name'), None)
if c and self.generate_css:
c = c.replace(".","_")
self.opentag('table',{ 'class': "T-%s" % c })
else:
self.opentag('table')
self.purgedata()
def e_table_table(self, tag, attrs):
""" End a table
"""
self.writedata()
self.closetag('table')
self.purgedata()
def s_table_table_cell(self, tag, attrs):
""" Start a table cell """
#FIXME: number-columns-repeated § 8.1.3
#repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
rowspan = attrs.get( (TABLENS,'number-rows-spanned') )
if rowspan:
htmlattrs['rowspan'] = rowspan
colspan = attrs.get( (TABLENS,'number-columns-spanned') )
if colspan:
htmlattrs['colspan'] = colspan
c = attrs.get( (TABLENS,'style-name') )
if c:
htmlattrs['class'] = 'TD-%s' % c.replace(".","_")
self.opentag('td', htmlattrs)
self.purgedata()
def e_table_table_cell(self, tag, attrs):
""" End a table cell """
self.writedata()
self.closetag('td')
self.purgedata()
def s_table_table_column(self, tag, attrs):
""" Start a table column """
c = attrs.get( (TABLENS,'style-name'), None)
repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
if c:
htmlattrs['class'] = "TC-%s" % c.replace(".","_")
for x in xrange(repeated):
self.emptytag('col', htmlattrs)
self.purgedata()
def s_table_table_row(self, tag, attrs):
""" Start a table row """
#FIXME: table:number-rows-repeated
c = attrs.get( (TABLENS,'style-name'), None)
htmlattrs = {}
if c:
htmlattrs['class'] = "TR-%s" % c.replace(".","_")
self.opentag('tr', htmlattrs)
self.purgedata()
def e_table_table_row(self, tag, attrs):
""" End a table row """
self.writedata()
self.closetag('tr')
self.purgedata()
def s_text_a(self, tag, attrs):
""" Anchors start """
self.writedata()
href = attrs[(XLINKNS,"href")].split("|")[0]
if href[:1] == "#": # Changed by Kovid
href = "#" + self.get_anchor(href[1:])
self.opentag('a', {'href':href})
self.purgedata()
def e_text_a(self, tag, attrs):
""" End an anchor or bookmark reference """
self.writedata()
self.closetag('a', False)
self.purgedata()
def s_text_bookmark(self, tag, attrs):
""" Bookmark definition """
name = attrs[(TEXTNS,'name')]
html_id = self.get_anchor(name)
self.writedata()
self.opentag('span', {'id':html_id})
self.closetag('span', False)
self.purgedata()
def s_text_bookmark_ref(self, tag, attrs):
""" Bookmark reference """
name = attrs[(TEXTNS,'ref-name')]
html_id = "#" + self.get_anchor(name)
self.writedata()
self.opentag('a', {'href':html_id})
self.purgedata()
def s_text_h(self, tag, attrs):
""" Headings start """
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
self.headinglevels[level] = self.headinglevels[level] + 1
name = self.classname(attrs)
for x in range(level + 1,10):
self.headinglevels[x] = 0
special = special_styles.get("P-"+name)
if special or not self.generate_css:
self.opentag('h%s' % level)
else:
self.opentag('h%s' % level, {'class':"P-%s" % name })
self.purgedata()
def e_text_h(self, tag, attrs):
""" Headings end
Side-effect: If there is no title in the metadata, then it is taken
from the first heading of any level.
"""
self.writedata()
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
lev = self.headinglevels[1:level+1]
outline = '.'.join(map(str,lev) )
heading = ''.join(self.data)
if self.title == '': self.title = heading
# Changed by Kovid
tail = ''.join(self.data)
anchor = self.get_anchor("%s.%s" % ( outline, tail))
anchor2 = self.get_anchor(tail) # Added by kovid to fix #7506
self.opentag('a', {'id': anchor} )
self.closetag('a', False)
self.opentag('a', {'id': anchor2} )
self.closetag('a', False)
self.closetag('h%s' % level)
self.purgedata()
def s_text_line_break(self, tag, attrs):
""" Force a line break (<br/>) """
self.writedata()
self.emptytag('br')
self.purgedata()
def s_text_list(self, tag, attrs):
""" Start a list (<ul> or <ol>)
To know which level we're at, we have to count the number
of <text:list> elements on the tagstack.
"""
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
if self.generate_css:
self.opentag('%s' % self.listtypes.get(list_class,'ul'), {'class': list_class })
else:
self.opentag('%s' % self.listtypes.get(list_class,'ul'))
self.purgedata()
def e_text_list(self, tag, attrs):
""" End a list """
self.writedata()
name = attrs.get( (TEXTNS,'style-name') )
level = self.tagstack.count_tags(tag) + 1
if name:
name = name.replace(".","_")
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
list_class = "%s_%d" % (name, level)
self.closetag(self.listtypes.get(list_class,'ul'))
self.purgedata()
def s_text_list_item(self, tag, attrs):
""" Start list item """
self.opentag('li')
self.purgedata()
def e_text_list_item(self, tag, attrs):
""" End list item """
self.writedata()
self.closetag('li')
self.purgedata()
def s_text_list_level_style_bullet(self, tag, attrs):
""" CSS doesn't have the ability to set the glyph
to a particular character, so we just go through
the available glyphs
"""
name = self.tagstack.rfindattr( (STYLENS,'name') )
level = attrs[(TEXTNS,'level')]
self.prevstyle = self.currentstyle
list_class = "%s_%s" % (name, level)
self.listtypes[list_class] = 'ul'
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
level = int(level)
listtype = ("square", "disc", "circle")[level % 3]
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_bullet(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_list_level_style_number(self, tag, attrs):
name = self.tagstack.stackparent()[(STYLENS,'name')]
level = attrs[(TEXTNS,'level')]
num_format = attrs.get( (STYLENS,'name'),"1")
list_class = "%s_%s" % (name, level)
self.prevstyle = self.currentstyle
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.listtypes[list_class] = 'ol'
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
if num_format == "1": listtype = "decimal"
elif num_format == "I": listtype = "upper-roman"
elif num_format == "i": listtype = "lower-roman"
elif num_format == "A": listtype = "upper-alpha"
elif num_format == "a": listtype = "lower-alpha"
else: listtype = "decimal"
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_number(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_note(self, tag, attrs):
self.writedata()
self.purgedata()
self.currentnote = self.currentnote + 1
self.notedict[self.currentnote] = {}
self.notebody = []
def e_text_note(self, tag, attrs):
pass
def collectnote(self,s):
if s != '':
self.notebody.append(s)
def s_text_note_body(self, tag, attrs):
self._orgwfunc = self._wfunc
self._wfunc = self.collectnote
def e_text_note_body(self, tag, attrs):
self._wfunc = self._orgwfunc
self.notedict[self.currentnote]['body'] = ''.join(self.notebody)
self.notebody = ''
del self._orgwfunc
def e_text_note_citation(self, tag, attrs):
mark = ''.join(self.data)
self.notedict[self.currentnote]['citation'] = mark
self.opentag('a',{ 'href': "#footnote-%s" % self.currentnote })
self.opentag('sup')
# self.writeout( escape(mark) )
# Since HTML only knows about endnotes, there is too much risk that the
# marker is reused in the source. Therefore we force numeric markers
self.writeout(unicode(self.currentnote))
self.closetag('sup')
self.closetag('a')
def s_text_p(self, tag, attrs):
""" Paragraph
"""
htmlattrs = {}
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
if self.generate_css:
htmlattrs['class'] = "P-%s" % c
self.opentag(specialtag, htmlattrs)
self.purgedata()
def e_text_p(self, tag, attrs):
""" End Paragraph
"""
specialtag = "p"
c = attrs.get( (TEXTNS,'style-name'), None)
if c:
c = c.replace(".","_")
specialtag = special_styles.get("P-"+c)
if specialtag is None:
specialtag = 'p'
self.writedata()
if not self.data: # Added by Kovid
# Give substance to empty paragraphs, as rendered by OOo
self.writeout(' ')
self.closetag(specialtag)
self.purgedata()
def s_text_s(self, tag, attrs):
# Changed by Kovid to fix non breaking spaces being prepended to
# element instead of being part of the text flow.
# We don't use an entity for the nbsp as the contents of self.data will
# be escaped on writeout.
""" Generate a number of spaces. We use the non breaking space for
the text:s ODF element.
"""
try:
c = int(attrs.get((TEXTNS, 'c'), 1))
except:
c = 0
if c > 0:
self.data.append(u'\u00a0'*c)
def s_text_span(self, tag, attrs):
""" The <text:span> element matches the <span> element in HTML. It is
typically used to properties of the text.
"""
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
htmlattrs = {}
# Changed by Kovid to handle inline special styles defined on <text:span> tags.
# Apparently LibreOffice does this.
special = 'span'
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None:
special = 'span'
if self.generate_css:
htmlattrs['class'] = "S-%s" % c
self.opentag(special, htmlattrs)
self.purgedata()
def e_text_span(self, tag, attrs):
""" End the <text:span> """
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
# Changed by Kovid to handle inline special styles defined on <text:span> tags.
# Apparently LibreOffice does this.
special = 'span'
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None:
special = 'span'
self.closetag(special, False)
self.purgedata()
def s_text_tab(self, tag, attrs):
""" Move to the next tabstop. We ignore this in HTML
"""
self.writedata()
self.writeout(' ')
self.purgedata()
def s_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
self.s_ignorexml(tag, attrs)
def e_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
#-----------------------------------------------------------------------------
#
# Reading the file
#
#-----------------------------------------------------------------------------
def load(self, odffile):
""" Loads a document into the parser and parses it.
The argument can either be a filename or a document in memory.
"""
self.lines = []
self._wfunc = self._wlines
if isinstance(odffile, basestring) \
or hasattr(odffile, 'read'): # Added by Kovid
self.document = load(odffile)
else:
self.document = odffile
self._walknode(self.document.topnode)
def _walknode(self, node):
if node.nodeType == Node.ELEMENT_NODE:
self.startElementNS(node.qname, node.tagName, node.attributes)
for c in node.childNodes:
self._walknode(c)
self.endElementNS(node.qname, node.tagName)
if node.nodeType == Node.TEXT_NODE or node.nodeType == Node.CDATA_SECTION_NODE:
self.characters(unicode(node))
def odf2xhtml(self, odffile):
""" Load a file and return the XHTML
"""
self.load(odffile)
return self.xhtml()
def _wlines(self,s):
if s != '': self.lines.append(s)
def xhtml(self):
""" Returns the xhtml
"""
return ''.join(self.lines)
def _writecss(self, s):
if s != '': self._csslines.append(s)
def _writenothing(self, s):
pass
def css(self):
""" Returns the CSS content """
self._csslines = []
self._wfunc = self._writecss
self.generate_stylesheet()
res = ''.join(self._csslines)
self._wfunc = self._wlines
del self._csslines
return res
def save(self, outputfile, addsuffix=False):
""" Save the HTML under the filename.
If the filename is '-' then save to stdout
We have the last style filename in self.stylefilename
"""
if outputfile == '-':
import sys # Added by Kovid
outputfp = sys.stdout
else:
if addsuffix:
outputfile = outputfile + ".html"
outputfp = file(outputfile, "w")
outputfp.write(self.xhtml().encode('us-ascii','xmlcharrefreplace'))
outputfp.close()
class ODF2XHTMLembedded(ODF2XHTML):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def __init__(self, lines, generate_css=True, embedable=False):
self._resetobject()
self.lines = lines
# Tags
self.generate_css = generate_css
self.elements = {
# (DCNS, 'title'): (self.s_processcont, self.e_dc_title),
# (DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
# (DCNS, 'creator'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
# (DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(DRAWNS, 'object'): (self.s_draw_object, None),
(DRAWNS, 'object-ole'): (self.s_draw_object_ole, None),
(DRAWNS, 'text-box'): (self.s_draw_textbox, self.e_draw_textbox),
# (METANS, 'creation-date'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
# (METANS, 'initial-creator'): (self.s_processcont, self.e_dc_metatag),
# (METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
# (OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
# (OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
# (OFFICENS, "master-styles"):(self.s_office_master_styles, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
# (OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
# (OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
# (OFFICENS, "styles"):(self.s_office_styles, None),
# (OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
## (STYLENS, "default-page-layout"):(self.s_style_default_page_layout, self.e_style_page_layout),
# (STYLENS, "default-page-layout"):(self.s_ignorexml, None),
# (STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
# (STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "font-face"):(self.s_style_font_face, None),
## (STYLENS, "footer"):(self.s_style_footer, self.e_style_footer),
## (STYLENS, "footer-style"):(self.s_style_footer_style, None),
# (STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "handout-master"):(self.s_ignorexml, None),
## (STYLENS, "header"):(self.s_style_header, self.e_style_header),
## (STYLENS, "header-footer-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "header-style"):(self.s_style_header_style, None),
# (STYLENS, "master-page"):(self.s_style_master_page, None),
# (STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
## (STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
# (STYLENS, "page-layout"):(self.s_ignorexml, None),
# (STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "style"):(self.s_style_style, self.e_style_style),
# (STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "table-properties"):(self.s_style_handle_properties, None),
# (STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(SVGNS, 'desc'): (self.s_ignorexml, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "page-number"):(None, None),
}
| gpl-3.0 | -5,261,564,764,330,478,000 | -774,515,313,733,312,000 | 40.231788 | 123 | 0.566145 | false |
fallen/artiq | artiq/frontend/artiq_run.py | 1 | 4103 | #!/usr/bin/env python3
# Copyright (C) 2014, 2015 M-Labs Limited
# Copyright (C) 2014, 2015 Robert Jordens <[email protected]>
import argparse
import sys
import time
from operator import itemgetter
from itertools import chain
import logging
import h5py
from artiq.language.environment import EnvExperiment
from artiq.protocols.file_db import FlatFileDB
from artiq.master.worker_db import DeviceManager, ResultDB
from artiq.tools import *
logger = logging.getLogger(__name__)
class ELFRunner(EnvExperiment):
def build(self):
self.attr_device("core")
self.attr_argument("file")
def run(self):
with open(self.file, "rb") as f:
self.core.comm.load(f.read())
self.core.comm.run("run")
self.core.comm.serve(dict(), dict())
class SimpleParamLogger:
def set(self, timestamp, name, value):
logger.info("Parameter change: {} = {}".format(name, value))
class DummyScheduler:
def __init__(self):
self.next_rid = 0
self.pipeline_name = "main"
self.priority = 0
self.expid = None
def submit(self, pipeline_name, expid, priority, due_date, flush):
rid = self.next_rid
self.next_rid += 1
logger.info("Submitting: %s, RID=%s", expid, rid)
return rid
def delete(self, rid):
logger.info("Deleting RID %s", rid)
def pause(self):
pass
def get_argparser(with_file=True):
parser = argparse.ArgumentParser(
description="Local experiment running tool")
verbosity_args(parser)
parser.add_argument("-d", "--ddb", default="ddb.pyon",
help="device database file")
parser.add_argument("-p", "--pdb", default="pdb.pyon",
help="parameter database file")
parser.add_argument("-e", "--experiment", default=None,
help="experiment to run")
parser.add_argument("-o", "--hdf5", default=None,
help="write results to specified HDF5 file"
" (default: print them)")
if with_file:
parser.add_argument("file",
help="file containing the experiment to run")
parser.add_argument("arguments", nargs="*",
help="run arguments")
return parser
def _build_experiment(dmgr, pdb, rdb, args):
if hasattr(args, "file"):
if args.file.endswith(".elf"):
if args.arguments:
raise ValueError("arguments not supported for ELF kernels")
if args.experiment:
raise ValueError("experiment-by-name not supported "
"for ELF kernels")
return ELFRunner(dmgr, pdb, rdb, file=args.file)
else:
module = file_import(args.file)
file = args.file
else:
module = sys.modules["__main__"]
file = getattr(module, "__file__")
exp = get_experiment(module, args.experiment)
arguments = parse_arguments(args.arguments)
expid = {
"file": file,
"experiment": args.experiment,
"arguments": arguments
}
dmgr.virtual_devices["scheduler"].expid = expid
return exp(dmgr, pdb, rdb, **arguments)
def run(with_file=False):
args = get_argparser(with_file).parse_args()
init_logger(args)
dmgr = DeviceManager(FlatFileDB(args.ddb),
virtual_devices={"scheduler": DummyScheduler()})
pdb = FlatFileDB(args.pdb)
pdb.hooks.append(SimpleParamLogger())
rdb = ResultDB()
try:
exp_inst = _build_experiment(dmgr, pdb, rdb, args)
exp_inst.prepare()
exp_inst.run()
exp_inst.analyze()
finally:
dmgr.close_devices()
if args.hdf5 is not None:
with h5py.File(args.hdf5, "w") as f:
rdb.write_hdf5(f)
elif rdb.rt.read or rdb.nrt:
r = chain(rdb.rt.read.items(), rdb.nrt.items())
for k, v in sorted(r, key=itemgetter(0)):
print("{}: {}".format(k, v))
def main():
return run(with_file=True)
if __name__ == "__main__":
main()
| gpl-3.0 | -7,597,973,428,868,383,000 | -155,251,083,813,065,100 | 27.894366 | 75 | 0.58835 | false |
blueskycoco/rt-thread | bsp/nuvoton/numaker-iot-m487/rtconfig.py | 12 | 3486 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Program Files (x86)\GNU Tools ARM Embedded\6 2017-q1-update\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:\Program Files (x86)\IAR Systems\Embedded Workbench 8.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = ''
# BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'g++'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -ffunction-sections -fdata-sections -Wuninitialized'
if BUILD == 'debug':
DEVICE = DEVICE + ' -DDEBUG'
CFLAGS = DEVICE
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map -T ./linking_scripts/m480_link.ld '
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu=cortex-m4.fp'
CFLAGS = DEVICE + ' --apcs=interwork'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --scatter ./linking_scripts/m480_flash.sct'
CFLAGS += ' --c99'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' '
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu None'
LFLAGS = ' --config ./linking_scripts/m480_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 | 8,545,200,576,508,137,000 | 338,703,464,891,824,800 | 25.409091 | 143 | 0.556799 | false |
cernops/neutron | neutron/db/external_net_db.py | 17 | 6456 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.sql import expression as expr
from neutron.api.v2 import attributes
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import external_net
from neutron import manager
from neutron.plugins.common import constants as service_constants
DEVICE_OWNER_ROUTER_GW = l3_constants.DEVICE_OWNER_ROUTER_GW
class ExternalNetwork(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
# Add a relationship to the Network model in order to instruct
# SQLAlchemy to eagerly load this association
network = orm.relationship(
models_v2.Network,
backref=orm.backref("external", lazy='joined',
uselist=False, cascade='delete'))
class External_net_db_mixin(object):
"""Mixin class to add external network methods to db_base_plugin_v2."""
def _network_model_hook(self, context, original_model, query):
query = query.outerjoin(ExternalNetwork,
(original_model.id ==
ExternalNetwork.network_id))
return query
def _network_filter_hook(self, context, original_model, conditions):
if conditions is not None and not hasattr(conditions, '__iter__'):
conditions = (conditions, )
# Apply the external network filter only in non-admin context
if not context.is_admin and hasattr(original_model, 'tenant_id'):
conditions = expr.or_(ExternalNetwork.network_id != expr.null(),
*conditions)
return conditions
def _network_result_filter_hook(self, query, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return query
if vals[0]:
return query.filter((ExternalNetwork.network_id != expr.null()))
return query.filter((ExternalNetwork.network_id == expr.null()))
# TODO(salvatore-orlando): Perform this operation without explicitly
# referring to db_base_plugin_v2, as plugins that do not extend from it
# might exist in the future
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Network,
"external_net",
'_network_model_hook',
'_network_filter_hook',
'_network_result_filter_hook')
def _network_is_external(self, context, net_id):
try:
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).one()
return True
except exc.NoResultFound:
return False
def _extend_network_dict_l3(self, network_res, network_db):
# Comparing with None for converting uuid into bool
network_res[external_net.EXTERNAL] = network_db.external is not None
return network_res
# Register dict extend functions for networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_extend_network_dict_l3'])
def _process_l3_create(self, context, net_data, req_data):
external = req_data.get(external_net.EXTERNAL)
external_set = attributes.is_attr_set(external)
if not external_set:
return
if external:
# expects to be called within a plugin's session
context.session.add(ExternalNetwork(network_id=net_data['id']))
net_data[external_net.EXTERNAL] = external
def _process_l3_update(self, context, net_data, req_data):
new_value = req_data.get(external_net.EXTERNAL)
net_id = net_data['id']
if not attributes.is_attr_set(new_value):
return
if net_data.get(external_net.EXTERNAL) == new_value:
return
if new_value:
context.session.add(ExternalNetwork(network_id=net_id))
net_data[external_net.EXTERNAL] = True
else:
# must make sure we do not have any external gateway ports
# (and thus, possible floating IPs) on this network before
# allow it to be update to external=False
port = context.session.query(models_v2.Port).filter_by(
device_owner=DEVICE_OWNER_ROUTER_GW,
network_id=net_data['id']).first()
if port:
raise external_net.ExternalNetworkInUse(net_id=net_id)
context.session.query(ExternalNetwork).filter_by(
network_id=net_id).delete()
net_data[external_net.EXTERNAL] = False
def _process_l3_delete(self, context, network_id):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin:
l3plugin.delete_disassociated_floatingips(context, network_id)
def _filter_nets_l3(self, context, nets, filters):
vals = filters and filters.get(external_net.EXTERNAL, [])
if not vals:
return nets
ext_nets = set(en['network_id']
for en in context.session.query(ExternalNetwork))
if vals[0]:
return [n for n in nets if n['id'] in ext_nets]
else:
return [n for n in nets if n['id'] not in ext_nets]
def get_external_network_id(self, context):
nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
if len(nets) > 1:
raise n_exc.TooManyExternalNetworks()
else:
return nets[0]['id'] if nets else None
| apache-2.0 | -9,118,819,675,553,363,000 | 6,585,994,425,662,120,000 | 38.607362 | 78 | 0.640335 | false |
coryb/aminator | aminator/plugins/finalizer/tagging_s3.py | 2 | 9211 | # -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.finalizer.tagging_s3
======================================
s3 tagging image finalizer
"""
import logging
from shutil import rmtree
from os.path import isdir
from os import makedirs, system
from os import environ
from aminator.config import conf_action
from aminator.plugins.finalizer.tagging_base import TaggingBaseFinalizerPlugin
from aminator.util import randword
from aminator.util.linux import sanitize_metadata, monitor_command
from aminator.util.metrics import cmdsucceeds, cmdfails, timer
__all__ = ('TaggingS3FinalizerPlugin',)
log = logging.getLogger(__name__)
class TaggingS3FinalizerPlugin(TaggingBaseFinalizerPlugin):
_name = 'tagging_s3'
def add_plugin_args(self):
tagging = super(TaggingS3FinalizerPlugin, self).add_plugin_args()
context = self._config.context
tagging.add_argument('-n', '--name', dest='name', action=conf_action(context.ami), help='name of resultant AMI (default package_name-version-release-arch-yyyymmddHHMM-s3')
tagging.add_argument('--cert', dest='cert', action=conf_action(context.ami), help='The path to the PEM encoded RSA public key certificate file for ec2-bundle-volume')
tagging.add_argument('--privatekey', dest='privatekey', action=conf_action(context.ami), help='The path to the PEM encoded RSA private key file for ec2-bundle-vol')
tagging.add_argument('--ec2-user', dest='ec2_user', action=conf_action(context.ami), help='ec2 user id for ec2-bundle-vol')
tagging.add_argument('--tmpdir', dest='tmpdir', action=conf_action(context.ami), help='temp directory used by ec2-bundle-vol')
tagging.add_argument('--bucket', dest='bucket', action=conf_action(context.ami), help='the S3 bucket to use for ec2-upload-bundle')
tagging.add_argument('--break-copy-volume', dest='break_copy_volume', action=conf_action(context.ami, action='store_true'), help='break into shell after copying the volume, for debugging')
def _set_metadata(self):
super(TaggingS3FinalizerPlugin, self)._set_metadata()
context = self._config.context
config = self._config.plugins[self.full_name]
metadata = context.package.attributes
ami_name = context.ami.get('name', None)
if not ami_name:
ami_name = config.name_format.format(**metadata)
context.ami.name = sanitize_metadata('{0}-s3'.format(ami_name))
def tmpdir(self):
config = self._config.plugins[self.full_name]
ami = self._config.context.ami
return "{0}/{1}".format(ami.get("tmpdir", config.get("default_tmpdir", "/tmp")), ami.name)
# pylint: disable=access-member-before-definition
def unique_name(self):
context = self._config.context
if hasattr(self, "_unique_name"):
return self._unique_name
self._unique_name = "{0}-{1}".format(context.ami.name, randword(6))
return self._unique_name
def image_location(self):
return "{0}/{1}".format(self.tmpdir(), self.unique_name())
@cmdsucceeds("aminator.finalizer.tagging_s3.copy_volume.count")
@cmdfails("aminator.finalizer.tagging_s3.copy_volume.error")
@timer("aminator.finalizer.tagging_s3.copy_volume.duration")
def _copy_volume(self):
context = self._config.context
tmpdir = self.tmpdir()
if not isdir(tmpdir):
makedirs(tmpdir)
return monitor_command(["dd", "bs=65536", "if={0}".format(context.volume.dev), "of={0}".format(self.image_location())])
@cmdsucceeds("aminator.finalizer.tagging_s3.bundle_image.count")
@cmdfails("aminator.finalizer.tagging_s3.bundle_image.error")
@timer("aminator.finalizer.tagging_s3.bundle_image.duration")
def _bundle_image(self):
context = self._config.context
config = self._config.plugins[self.full_name]
block_device_map = config.default_block_device_map
root_device = config.default_root_device
bdm = "root={0}".format(root_device)
for bd in block_device_map:
bdm += ",{0}={1}".format(bd[1], bd[0])
bdm += ",ami={0}".format(root_device)
cmd = ['ec2-bundle-image']
cmd.extend(['-c', context.ami.get("cert", config.default_cert)])
cmd.extend(['-k', context.ami.get("privatekey", config.default_privatekey)])
cmd.extend(['-u', context.ami.get("ec2_user", str(config.default_ec2_user))])
cmd.extend(['-i', self.image_location()])
cmd.extend(['-d', self.tmpdir()])
if context.base_ami.architecture:
cmd.extend(['-r', context.base_ami.architecture])
vm_type = context.ami.get("vm_type", "paravirtual")
if vm_type == "paravirtual":
if context.base_ami.kernel_id:
cmd.extend(['--kernel', context.base_ami.kernel_id])
if context.base_ami.ramdisk_id:
cmd.extend(['--ramdisk', context.base_ami.ramdisk_id])
cmd.extend(['-B', bdm])
return monitor_command(cmd)
@cmdsucceeds("aminator.finalizer.tagging_s3.upload_bundle.count")
@cmdfails("aminator.finalizer.tagging_s3.upload_bundle.error")
@timer("aminator.finalizer.tagging_s3.upload_bundle.duration")
def _upload_bundle(self):
context = self._config.context
provider = self._cloud._connection.provider
ak = provider.get_access_key()
sk = provider.get_secret_key()
tk = provider.get_security_token()
cmd = ['ec2-upload-bundle']
cmd.extend(['-b', context.ami.bucket])
cmd.extend(['-a', ak])
cmd.extend(['-s', sk])
if tk:
cmd.extend(['-t', tk])
cmd.extend(['-m', "{0}.manifest.xml".format(self.image_location())])
cmd.extend(['--retry'])
return monitor_command(cmd)
def _register_image(self):
context = self._config.context
log.info('Registering image')
if not self._cloud.register_image(manifest="{0}/{1}.manifest.xml".format(context.ami.bucket, self.unique_name())):
return False
log.info('Registration success')
return True
def finalize(self):
log.info('Finalizing image')
context = self._config.context
self._set_metadata()
ret = self._copy_volume()
if not ret.success:
log.debug('Error copying volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if context.ami.get('break_copy_volume', False):
system("bash")
ret = self._bundle_image()
if not ret.success:
log.debug('Error bundling image, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
ret = self._upload_bundle()
if not ret.success:
log.debug('Error uploading bundled volume, failure:{0.command} :{0.std_err}'.format(ret.result))
return False
if not self._register_image():
log.critical('Error registering image')
return False
if not self._add_tags(['ami']):
log.critical('Error adding tags')
return False
log.info('Image registered and tagged')
self._log_ami_metadata()
return True
def __enter__(self):
context = self._config.context
environ["AMINATOR_STORE_TYPE"] = "s3"
if context.ami.get("name", None):
environ["AMINATOR_AMI_NAME"] = context.ami.name
if context.ami.get("cert", None):
environ["AMINATOR_CERT"] = context.ami.cert
if context.ami.get("privatekey", None):
environ["AMINATOR_PRIVATEKEY"] = context.ami.privatekey
if context.ami.get("ec2_user", None):
environ["AMINATOR_EC2_USER"] = context.ami.ec2_user
if context.ami.get("tmpdir", None):
environ["AMINATOR_TMPDIR"] = context.ami.tmpdir
if context.ami.get("bucket", None):
environ["AMINATOR_BUCKET"] = context.ami.bucket
return super(TaggingS3FinalizerPlugin, self).__enter__()
def __exit__(self, exc_type, exc_value, trace):
if exc_type:
log.debug('Exception encountered in tagging s3 finalizer context manager',
exc_info=(exc_type, exc_value, trace))
# delete tmpdir used by ec2-bundle-vol
try:
td = self.tmpdir()
if isdir(td):
rmtree(td)
except Exception:
log.debug('Exception encountered attempting to clean s3 bundle tmpdir',
exc_info=True)
return False
| apache-2.0 | -6,078,754,656,380,839,000 | -5,176,393,778,648,135,000 | 40.120536 | 196 | 0.62979 | false |
akrzos/cfme_tests | cfme/tests/integration/test_aws_iam_auth_and_roles.py | 2 | 1835 | import pytest
from cfme.configure.access_control import simple_user
from cfme.login import login
from cfme.web_ui import menu
from utils.conf import credentials
from utils.testgen import auth_groups, generate
from utils import version
from utils.providers import setup_a_provider
pytest_generate_tests = generate(auth_groups, auth_mode='aws_iam')
@pytest.fixture(scope="module")
def setup_first_provider():
setup_a_provider(validate=True, check_existing=True)
@pytest.mark.tier(2)
def test_group_roles(configure_aws_iam_auth_mode, group_name, group_data, setup_first_provider):
"""Basic default AWS_IAM group role RBAC test
Validates expected menu and submenu names are present for default
AWS IAM groups
"""
# This should be removed but currently these roles are subject to a bug
if version.current_version() >= '5.4' and group_name in ['evmgroup-administrator',
'evmgroup-approver',
'evmgroup-auditor',
'evmgroup-operator',
'evmgroup-security',
'evmgroup-support',
'evmgroup-user']:
pytest.skip("This role currently fails this test")
try:
iam_group_name = group_name + '_aws_iam'
username = credentials[iam_group_name]['username']
password = credentials[iam_group_name]['password']
except KeyError:
pytest.fail('No match in credentials file for group "{}"'.format(iam_group_name))
login(simple_user(username, password))
assert set(menu.visible_pages()) == set(group_data)
| gpl-2.0 | 36,811,485,213,023,610 | 2,995,522,691,266,947,600 | 38.891304 | 96 | 0.580381 | false |
vntarasov/openpilot | selfdrive/debug/get_fingerprint.py | 1 | 1030 | #!/usr/bin/env python3
# simple script to get a vehicle fingerprint.
# Instructions:
# - connect to a Panda
# - run selfdrive/boardd/boardd
# - launching this script
# - turn on the car in STOCK MODE (set giraffe switches properly).
# Note: it's very important that the car is in stock mode, in order to collect a complete fingerprint
# - since some messages are published at low frequency, keep this script running for at least 30s,
# until all messages are received at least once
import cereal.messaging as messaging
logcan = messaging.sub_sock('can')
msgs = {}
while True:
lc = messaging.recv_sock(logcan, True)
if lc is None:
continue
for c in lc.can:
# read also msgs sent by EON on CAN bus 0x80 and filter out the
# addr with more than 11 bits
if c.src in [0, 2] and c.address < 0x800:
msgs[c.address] = len(c.dat)
fingerprint = ', '.join("%d: %d" % v for v in sorted(msgs.items()))
print("number of messages {0}:".format(len(msgs)))
print("fingerprint {0}".format(fingerprint))
| mit | 1,493,900,653,324,489,000 | 187,359,252,416,983,300 | 31.1875 | 103 | 0.695146 | false |
mcr/ietfdb | django/core/management/base.py | 45 | 16447 | """
Base classes for writing management commands (named commands which can
be executed through ``django-admin.py`` or ``manage.py``).
"""
import os
import sys
from optparse import make_option, OptionParser
import django
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style
from django.utils.encoding import smart_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_model_validation``
A boolean; if ``True``, validation of installed models will be
performed prior to executing the command. Default value is
``True``. To validate an individual application's models
rather than all applications' models, call
``self.validate(app)`` from ``handle()``, where ``app`` is the
application's Python module.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
# Switch to English, because django-admin.py creates database content
# like permissions, and those shouldn't contain any translations.
# But only do this if we can assume we have a working settings file,
# because django.utils.translation requires settings.
if self.can_import_settings:
try:
from django.utils import translation
translation.activate('en-us')
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
def validate(self, app=None, display_num_errors=False):
"""
Validates the given app, raising CommandError for any errors.
If app is None, then this will validate all installed apps.
"""
from django.core.management.validation import get_validation_errors
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
s = StringIO()
num_errors = get_validation_errors(s, app)
if num_errors:
s.seek(0)
error_text = s.read()
raise CommandError("One or more models did not validate:\n%s" % error_text)
if display_num_errors:
self.stdout.write("%s error%s found\n" % (num_errors, num_errors != 1 and 's' or ''))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
from django.db import models
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [models.get_app(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError), e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
def copy_helper(style, app_or_project, name, directory, other_name=''):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
"""
# style -- A color style object (see django.core.management.color).
# app_or_project -- The string 'app' or 'project'.
# name -- The name of the application or project.
# directory -- The directory to which the layout template should be copied.
# other_name -- When copying an application layout, this should be the name
# of the project.
import re
import shutil
other = {'project': 'app', 'app': 'project'}[app_or_project]
if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
top_dir = os.path.join(directory, name)
try:
os.mkdir(top_dir)
except OSError, e:
raise CommandError(e)
# Determine where the app or project templates are. Use
# django.__path__[0] because we don't know into which directory
# django has been installed.
template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
if relative_dir:
os.mkdir(os.path.join(top_dir, relative_dir))
for subdir in subdirs[:]:
if subdir.startswith('.'):
subdirs.remove(subdir)
for f in files:
if not f.endswith('.py'):
# Ignore .pyc, .pyo, .py.class etc, as they cause various
# breakages.
continue
path_old = os.path.join(d, f)
path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| bsd-3-clause | -2,550,879,668,248,920,000 | 1,765,551,426,064,070,400 | 37.160093 | 177 | 0.617499 | false |
vcoin-project/v | qa/rpc-tests/test_framework/bignum.py | 1 | 1991 | #
#
# bignum.py
#
# This file is copied from python-vcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# vcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
| mit | -6,736,724,139,998,493,000 | 3,913,249,252,660,304,000 | 18.519608 | 82 | 0.52436 | false |
jherico/ctypesgen | ctypesgencore/options.py | 13 | 1164 | #!/usr/bin/env python
"""
All of the components of ctypegencore require an argument called "options".
In command-line usage, this would be an optparser.Values object. However, if
ctypesgencore is used as a standard Python module, constructing this object
would be a pain. So this module exists to provide a "default" options object
for convenience.
"""
import optparse, copy
default_values={
"other_headers": [],
"modules": [],
"include_search_paths": [],
"compile_libdirs": [],
"runtime_libdirs": [],
"cpp": "gcc -E",
"save_preprocessed_headers": None,
"all_headers": False,
"builtin_symbols": False,
"include_symbols": None,
"exclude_symbols": None,
"show_all_errors": False,
"show_long_errors": False,
"show_macro_warnings": True,
"header_template": None,
"inserted_files": [],
"other_known_names": [],
"include_macros": True,
"libraries": [],
"strip_build_path": None,
"output_language": "python",
"no_stddef_types": False,
"no_gnu_types": False,
"no_python_types": False,
}
def get_default_options():
return optparse.Values(copy.deepcopy(default_values))
| bsd-3-clause | 9,222,728,053,804,631,000 | -4,605,864,793,456,378,000 | 27.390244 | 76 | 0.652921 | false |
translate/pootle | pytest_pootle/env.py | 3 | 23700 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from translate.storage.factory import getclass
class PootleTestEnv(object):
methods = (
"redis", "case_sensitive_schema", "formats", "site_root",
"languages", "suggestion_states", "site_matrix", "system_users",
"permissions", "site_permissions", "tps", "templates",
"disabled_project", "subdirs", "submissions", "announcements",
"terminology", "fs", "vfolders", "complex_po")
def setup(self, **kwargs):
for method in self.methods:
should_setup = (
method not in kwargs
or kwargs[method])
if should_setup:
getattr(self, "setup_%s" % method)()
def setup_formats(self):
from pootle.core.delegate import formats
formats.get().initialize()
def setup_complex_po(self):
import pytest_pootle
from pytest_pootle.factories import StoreDBFactory
from pootle_translationproject.models import TranslationProject
po_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "complex.po"))
with open(po_file) as f:
ttk = getclass(f)(f.read())
tp = TranslationProject.objects.get(
project__code="project0",
language__code="language0")
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="complex.po")
store.update(ttk)
def setup_suggestion_states(self):
from pootle_store.models import SuggestionState
for state in ["pending", "accepted", "rejected"]:
SuggestionState.objects.get_or_create(name=state)
def setup_announcements(self):
from pytest_pootle.factories import AnnouncementFactory
from pootle_project.models import Project
from pootle_language.models import Language
from pootle_translationproject.models import TranslationProject
for language in Language.objects.all():
AnnouncementFactory(
title="Language announcement for: %s" % language,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/languages/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s" % language.code)
for project in Project.objects.all():
AnnouncementFactory(
title="Project announcement for: %s" % project,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/projects/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/projects/%s" % project.code)
for tp in TranslationProject.objects.all():
AnnouncementFactory(
title="TP announcement for: %s" % tp,
body=(
'<div dir="ltr" lang="en">This is an example announcements. '
'Just like a real announcement it contains text and some '
'markup, and even a random link about localisation.<br />'
'<a href="http://docs.translatehouse.org/tps/'
'localization-guide/en/latest/guide/start.html">localisation '
'guide</a>.</div>'),
virtual_path="announcements/%s/%s"
% (tp.language.code, tp.project.code))
def setup_case_sensitive_schema(self):
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
cursor = connection.cursor()
# Language
set_mysql_collation_for_column(
apps,
cursor,
"pootle_language.Language",
"code",
"utf8_general_ci",
"varchar(50)")
# Project
set_mysql_collation_for_column(
apps,
cursor,
"pootle_project.Project",
"code",
"utf8_bin",
"varchar(255)")
# Directory
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_app.Directory",
"name",
"utf8_bin",
"varchar(255)")
# Store
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"pootle_path",
"utf8_bin",
"varchar(255)")
set_mysql_collation_for_column(
apps,
cursor,
"pootle_store.Store",
"name",
"utf8_bin",
"varchar(255)")
# Word.Stem
set_mysql_collation_for_column(
apps,
cursor,
"pootle_word.Stem",
"root",
"utf8_bin",
"varchar(255)")
def setup_permissions(self):
from django.contrib.contenttypes.models import ContentType
from .fixtures.models.permission import _require_permission
args = {
'app_label': 'pootle_app',
'model': 'directory'}
pootle_content_type = ContentType.objects.get(**args)
_require_permission(
'view',
'Can access a project',
pootle_content_type)
_require_permission(
'hide',
'Cannot access a project',
pootle_content_type)
_require_permission(
'suggest',
'Can make a suggestion',
pootle_content_type)
_require_permission(
'translate',
'Can submit translations',
pootle_content_type)
_require_permission(
'review',
'Can review translations',
pootle_content_type)
_require_permission(
'administrate',
'Can administrate a TP',
pootle_content_type)
def _setup_project_fs(self, project):
from pootle_fs.utils import FSPlugin
from pytest_pootle.utils import add_store_fs
project.config["pootle_fs.fs_type"] = "localfs"
project.config["pootle_fs.translation_mappings"] = {
"default": "/<language_code>/<dir_path>/<filename>.<ext>"}
project.config["pootle_fs.fs_url"] = "/tmp/path/for/setup"
plugin = FSPlugin(project)
for store in plugin.resources.stores:
add_store_fs(
store=store,
fs_path=plugin.get_fs_path(store.pootle_path),
synced=True)
def setup_fs(self):
from pootle_project.models import Project
for i in range(0, 2):
project = Project.objects.get(code="project%s" % i)
self._setup_project_fs(project)
self._setup_project_fs(
Project.objects.get(code="terminology"))
self._setup_project_fs(
Project.objects.get(code="disabled_project0"))
def setup_languages(self):
from .fixtures.models.language import _require_language
_require_language('en', 'English')
def setup_redis(self):
from pootle.core.models import Revision
Revision.initialize(force=True)
def setup_system_users(self):
from django.contrib.auth import get_user_model
from .fixtures.models.user import TEST_USERS, _require_user
users = {
user.username: user
for user
in get_user_model().objects.all()}
for username, user_params in TEST_USERS.items():
user_params["email"] = "%[email protected]" % username
TEST_USERS[username]["user"] = (
users.get(username)
or _require_user(username=username, **user_params))
def setup_site_permissions(self):
from django.contrib.auth import get_user_model
from pootle_app.models import Directory, PermissionSet
User = get_user_model()
nobody = User.objects.get_nobody_user()
default = User.objects.get_default_user()
from django.contrib.auth.models import Permission
view = Permission.objects.get(codename="view")
suggest = Permission.objects.get(codename="suggest")
translate = Permission.objects.get(codename="translate")
criteria = {
'user': nobody,
'directory': Directory.objects.root}
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest])
permission_set.save()
criteria['user'] = default
permission_set, created = PermissionSet.objects.get_or_create(**criteria)
if created:
permission_set.positive_permissions.set([view, suggest, translate])
permission_set.save()
def setup_site_root(self):
from pytest_pootle.factories import DirectoryFactory
DirectoryFactory(
name="projects",
parent=DirectoryFactory(parent=None, name=""))
def setup_site_matrix(self):
from pytest_pootle.factories import ProjectDBFactory, LanguageDBFactory
from pootle_format.models import Format
from pootle_language.models import Language
# add 2 languages
for i_ in range(0, 2):
LanguageDBFactory()
source_language = Language.objects.get(code="en")
po = Format.objects.get(name="po")
for i_ in range(0, 2):
# add 2 projects
project = ProjectDBFactory(
source_language=source_language)
project.filetypes.add(po)
def setup_terminology(self):
import pytest_pootle
from pytest_pootle.factories import (
ProjectDBFactory, StoreDBFactory, TranslationProjectFactory)
from pootle_language.models import Language
source_language = Language.objects.get(code="en")
terminology = ProjectDBFactory(code="terminology",
checkstyle="terminology",
fullname="Terminology",
source_language=source_language)
term_file = os.path.join(
os.path.dirname(pytest_pootle.__file__),
*("data", "po", "terminology.po"))
with open(term_file) as f:
term_ttk = getclass(f)(f.read())
for language in Language.objects.all():
tp = TranslationProjectFactory(
project=terminology, language=language)
if language.code not in ["language0", "language1"]:
continue
store = StoreDBFactory(
parent=tp.directory,
translation_project=tp,
name="terminology.po")
store.update(term_ttk)
def setup_disabled_project(self):
from pytest_pootle.factories import (DirectoryFactory,
ProjectDBFactory,
TranslationProjectFactory)
from pootle.core.contextmanagers import keep_data
from pootle_format.models import Format
from pootle_language.models import Language
with keep_data():
source_language = Language.objects.get(code="en")
project = ProjectDBFactory(code="disabled_project0",
fullname="Disabled Project 0",
source_language=source_language)
project.filetypes.add(Format.objects.get(name="po"))
project.disabled = True
project.save()
language = Language.objects.get(code="language0")
tp = TranslationProjectFactory(project=project, language=language)
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp, n=(1, 1))
subdir0 = DirectoryFactory(name="subdir0", parent=tp.directory, tp=tp)
self._add_stores(tp, n=(1, 1), parent=subdir0)
def setup_subdirs(self):
from pytest_pootle.factories import DirectoryFactory
from pootle.core.contextmanagers import keep_data
from pootle_translationproject.models import TranslationProject
with keep_data():
for tp in TranslationProject.objects.all():
subdir0 = DirectoryFactory(
name="subdir0", parent=tp.directory, tp=tp)
subdir1 = DirectoryFactory(
name="subdir1", parent=subdir0, tp=tp)
self._add_stores(tp, n=(2, 1), parent=subdir0)
self._add_stores(tp, n=(1, 1), parent=subdir1)
def setup_submissions(self):
from django.contrib.auth import get_user_model
from django.utils import timezone
from pootle.core.contextmanagers import bulk_operations
from pootle_data.models import TPChecksData, TPData
from pootle_score.models import UserTPScore
from pootle_statistics.models import SubmissionTypes
from pootle_store.constants import UNTRANSLATED
from pootle_store.models import Unit, UnitChange
from pootle_translationproject.contextmanagers import update_tp_after
from pootle_translationproject.models import TranslationProject
year_ago = timezone.now() - relativedelta(years=1)
units = Unit.objects.all()
units.update(creation_time=year_ago)
User = get_user_model()
admin = User.objects.get(username="admin")
member = User.objects.get(username="member")
member2 = User.objects.get(username="member2")
UnitChange.objects.bulk_create(
UnitChange(unit_id=unit_id, changed_with=SubmissionTypes.SYSTEM)
for unit_id
in units.filter(state__gt=UNTRANSLATED).values_list("id", flat=True))
tps = TranslationProject.objects.exclude(
language__code="templates").select_related(
"language", "project__source_language").all()
bulk_pootle = bulk_operations(
models=(
get_user_model(),
UserTPScore,
TPData,
TPChecksData))
with bulk_pootle:
for tp in tps:
with update_tp_after(tp):
self._add_subs_to_stores(
tp.stores, admin, member, member2)
def _add_subs_to_stores(self, stores, admin, member, member2):
for store in stores.select_related("data", "parent"):
self._add_subs_to_store(store, admin, member, member2)
def _add_subs_to_store(self, store, admin, member, member2):
from django.utils import timezone
# from pootle_store.contextmanagers import update_store_after
year_ago = timezone.now() - relativedelta(years=1)
units = store.unit_set.select_related("change").all()
for unit in units:
self._add_submissions(
unit, year_ago, admin, member, member2)
def setup_templates(self):
from pootle.core.contextmanagers import keep_data
from pootle.core.signals import update_data
from pootle_project.models import Project
from pootle_translationproject.contextmanagers import update_tp_after
from pytest_pootle.factories import (
LanguageDBFactory, TranslationProjectFactory)
tps = []
with keep_data():
templates = LanguageDBFactory(code="templates")
for project in Project.objects.all():
# add a TP to the project for each language
tp = TranslationProjectFactory(project=project, language=templates)
# As there are no files on the FS we have to currently unobsolete
# the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_template_stores(tp)
tps.append(tp)
for tp in tps:
with update_tp_after(tp):
for store in tp.stores.all():
update_data.send(
store.__class__,
instance=store)
def setup_tps(self):
from pootle.core.contextmanagers import keep_data
from pootle_project.models import Project
from pootle_language.models import Language
from pytest_pootle.factories import TranslationProjectFactory
with keep_data():
for project in Project.objects.select_related("source_language").all():
for language in Language.objects.exclude(code="en"):
# add a TP to the project for each language
tp = TranslationProjectFactory(
project=project, language=language)
# As there are no files on the FS we have to currently
# unobsolete the directory
tp_dir = tp.directory
tp_dir.obsolete = False
tp_dir.save()
self._add_stores(tp)
def _add_template_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
for i_ in range(0, n[0]):
# add 3 stores
store = StoreDBFactory(translation_project=tp)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for i_ in range(0, 4):
UnitDBFactory(store=store, target="")
def _add_stores(self, tp, n=(3, 2), parent=None):
from pytest_pootle.factories import StoreDBFactory, UnitDBFactory
from pootle_store.constants import UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE
for i_ in range(0, n[0]):
# add 3 stores
if parent is None:
store = StoreDBFactory(translation_project=tp)
else:
store = StoreDBFactory(translation_project=tp, parent=parent)
store.filetype = tp.project.filetype_tool.choose_filetype(store.name)
store.save()
# add 8 units to each store
for state in [UNTRANSLATED, TRANSLATED, FUZZY, OBSOLETE]:
for i_ in range(0, n[1]):
UnitDBFactory(store=store, state=state)
def _update_submission_times(self, unit, update_time, last_update=None):
submissions = unit.submission_set.all()
if last_update:
submissions = submissions.exclude(
creation_time__lte=last_update)
submissions.update(creation_time=update_time)
def _add_submissions(self, unit, created, admin, member, member2):
from pootle.core.delegate import review
from pootle_store.constants import UNTRANSLATED, FUZZY, OBSOLETE
from pootle_store.models import Suggestion, Unit, UnitChange
from django.utils import timezone
original_state = unit.state
unit.created = created
first_modified = created + relativedelta(months=unit.index, days=10)
# add suggestion at first_modified
suggestion_review = review.get(Suggestion)
suggestion, created_ = suggestion_review().add(
unit,
"Suggestion for %s" % (unit.target or unit.source),
user=member)
self._update_submission_times(unit, first_modified, created)
# accept the suggestion 7 days later if not untranslated
next_time = first_modified + timedelta(days=7)
if original_state == UNTRANSLATED:
suggestion_review([suggestion], reviewer=admin).reject()
else:
Unit.objects.filter(pk=unit.pk).update(mtime=next_time)
UnitChange.objects.filter(
unit_id=unit.pk).update(
reviewed_on=next_time,
reviewed_by=admin,
submitted_on=next_time)
suggestion_review([suggestion], reviewer=admin).accept()
self._update_submission_times(
unit, next_time, first_modified)
# add another suggestion as different user 7 days later
suggestion2_, created_ = suggestion_review().add(
unit,
"Suggestion 2 for %s" % (unit.target or unit.source),
user=member2)
self._update_submission_times(
unit,
first_modified + timedelta(days=14),
next_time)
# mark FUZZY
if original_state == FUZZY:
unit.markfuzzy()
# mark OBSOLETE
elif original_state == OBSOLETE:
unit.makeobsolete()
elif unit.target:
# Re-edit units with translations, adding some submissions
# of SubmissionTypes.EDIT_TYPES
old_target = unit.target
current_time = timezone.now() - timedelta(days=14)
unit.__class__.objects.filter(id=unit.id).update(
target_f="Updated %s" % old_target,
mtime=current_time)
unit.change.save()
def setup_vfolders(self):
from pytest_pootle.factories import VirtualFolderDBFactory
from django.db import connection
from django.apps import apps
from pootle.core.utils.db import set_mysql_collation_for_column
from pootle_language.models import Language
from pootle_project.models import Project
cursor = connection.cursor()
# VirtualFolder
set_mysql_collation_for_column(
apps,
cursor,
"virtualfolder.VirtualFolder",
"name",
"utf8_bin",
"varchar(70)")
project0 = Project.objects.get(code="project0")
language0 = Language.objects.get(code="language0")
VirtualFolderDBFactory(filter_rules="store0.po")
VirtualFolderDBFactory(filter_rules="store1.po")
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store0.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
all_languages=True,
is_public=False,
filter_rules="store1.po")
vf.projects.add(project0)
vf.save()
vf = VirtualFolderDBFactory(
filter_rules="subdir0/store4.po")
vf.languages.add(language0)
vf.projects.add(project0)
vf.save()
| gpl-3.0 | -9,190,604,753,078,833,000 | -337,188,366,135,001,400 | 36.440758 | 84 | 0.577553 | false |
alexandrul-ci/robotframework | utest/writer/test_rowsplitter.py | 7 | 5821 | import unittest
from robot.writer.rowsplitter import RowSplitter
from robot.utils.asserts import assert_equal
class TestRowSplitter(unittest.TestCase):
def _test(self, data, expected, cols=3, table_type='settings'):
splitter = RowSplitter(cols=cols)
actual = list(splitter.split(data, table_type))
assert_equal(actual, expected)
def test_escaping_empty_cells_at_eol(self):
self._test(['First', 'second', ''],
[['First', 'second', '${EMPTY}']])
self._test(['First', 'second', '', 'next line'],
[['First', 'second', '${EMPTY}'],
['...', 'next line']])
self._test(['1.1', '1.2', '1.3', '', '2.1', '2.2', '', '3.1', '', ''],
[['1.1', '1.2', '1.3', '${EMPTY}'],
['...', '2.1', '2.2', '${EMPTY}'],
['...', '3.1', '', '${EMPTY}']], cols=4)
def test_splitting_inside_comment(self):
self._test(['Kw', 'Arg', '#Comment in', 'many cells'],
[['Kw', 'Arg', '#Comment in'],
['...', '# many cells']])
self._test(['Kw', 'Arg', '# Comment', 'in', 'very', 'many', 'cells', '!'],
[['Kw', 'Arg', '# Comment'],
['...', '# in', 'very'],
['...', '# many', 'cells'],
['...', '# !']])
self._test(['Kw', 'Arg', '# Comment in', 'many cells'],
[['Kw', 'Arg'],
['...', '# Comment in'],
['...', '# many cells']], cols=2)
def test_no_extra_comment_marker(self):
self._test(['1', '2', '3', '# Comment'],
[['1', '2', '3'],
['...', '# Comment']])
self._test(['1', '2', '# C 1', '# C 2'],
[['1', '2', '# C 1'],
['...', '# C 2']])
def test_splitting_whitespace_rows(self):
data = ['', '', '', '', 'foo', '# Comment']
for cols, expected in [(4, [['', '', '', '${EMPTY}'],
['...', 'foo', '# Comment']]),
(3, [['', '', '${EMPTY}'],
['...', '', 'foo'],
['...', '# Comment']]),
(2, [['', '${EMPTY}'],
['...', '${EMPTY}'],
['...', '${EMPTY}'],
['...', 'foo'],
['...', '# Comment']])]:
self._test(data, expected, cols)
def test_min_indent(self):
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['...', '4']])
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['', '...', '4']], table_type='keyword')
self._test(['1', '2', '3', '4'],
[['1', '2', '3'], ['', '...', '4']], table_type='test case')
def test_split_else(self):
self._test(['Run Keyword If', 'expression', 'Kw 1', 'ELSE', 'Kw 2'],
[['Run Keyword If', 'expression', 'Kw 1'],
['...', 'ELSE', 'Kw 2']], cols=100)
self._test(['Run Keyword If', 'e1', 'Kw 1', 'ELSE IF', 'e2', 'Kw 2'],
[['Run Keyword If', 'e1', 'Kw 1'],
['...', 'ELSE IF', 'e2', 'Kw 2']], cols=100)
self._test(['1', '2', 'ELSE IF', '3', '4', 'ELSE IF', '5', 'ELSE', '6'],
[['1', '2'],
['...', 'ELSE IF', '3', '4'],
['...', 'ELSE IF', '5'],
['...', 'ELSE', '6']], cols=100)
def test_split_also_and(self):
self._test(['Run Keywords', 'k1', 'AND', 'k2', 'a', 'b', 'AND', 'k3'],
[['Run Keywords', 'k1'],
['...', 'AND', 'k2', 'a', 'b'],
['...', 'AND', 'k3']], cols=100)
self._test(['', '1', 'AND', '2', 'ELSE', '3', 'ELSE IF', '4', 'AND', '5'],
[['', '1'],
['', '...', 'AND', '2'],
['', '...', 'ELSE', '3'],
['', '...', 'ELSE IF', '4'],
['', '...', 'AND', '5']], cols=100)
def test_dont_split_else_or_and_in_first_cell(self):
for data in (['ELSE', '1', '2'],
['ELSE IF', '1', '2'],
['AND', '1', '2']):
for no_split in (data,
[''] + data,
['', '', ''] + data,
['...'] + data,
['', '...'] + data,
['', '', '', '...'] + data):
self._test(no_split, [no_split], cols=100)
def test_split_internal_else_lines(self):
data = ['1', '2', '3', '4', '5', '6', '7', '8']
self._test(data + ['ELSE IF'] + data + ['ELSE'] + data,
[['1', '2', '3', '4'],
['...', '5', '6', '7'],
['...', '8'],
['...', 'ELSE IF', '1', '2'],
['...', '3', '4', '5'],
['...', '6', '7', '8'],
['...', 'ELSE', '1', '2'],
['...', '3', '4', '5'],
['...', '6', '7', '8']],
cols=4)
self._test([''] + data + ['ELSE IF'] + data + ['ELSE'] + data,
[['', '1', '2', '3', '4', '5', '6', '7'],
['', '...', '8'],
['', '...', 'ELSE IF', '1', '2', '3', '4', '5'],
['', '...', '6', '7', '8'],
['', '...', 'ELSE', '1', '2', '3', '4', '5'],
['', '...', '6', '7', '8']],
cols=8)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -3,665,314,434,242,221,600 | 2,429,297,705,720,445,000 | 43.776923 | 82 | 0.289126 | false |
DSMan195276/protura-binutils | gdb/syscalls/arm-linux.py | 46 | 1759 | # Copyright (C) 2013-2015 Free Software Foundation, Inc.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
import sys
import re
import time
infname = sys.argv[1]
inf = file(infname)
print("""\
<?xml version="1.0"?>
<!-- Copyright (C) 2009-%s Free Software Foundation, Inc.
Copying and distribution of this file, with or without modification,
are permitted in any medium without royalty provided the copyright
notice and this notice are preserved. This file is offered as-is,
without any warranty. -->
<!DOCTYPE feature SYSTEM "gdb-syscalls.dtd">
<!-- This file was generated using the following file:
%s
The file mentioned above belongs to the Linux Kernel.
Some small hand-edits were made. -->
<syscalls_info>""" % (time.strftime("%Y"), infname))
def record(name, number, comment=None):
#nm = 'name="%s"' % name
#s = ' <syscall %-30s number="%d"/>' % (nm, number)
s = ' <syscall name="%s" number="%d"/>' % (name, number)
if comment:
s += ' <!-- %s -->' % comment
print(s)
for line in inf:
m = re.match(r'^#define __NR_(\w+)\s+\(__NR_SYSCALL_BASE\+\s*(\d+)\)',
line)
if m:
record(m.group(1), int(m.group(2)))
continue
m = re.match(r'^\s+/\* (\d+) was sys_(\w+) \*/$', line)
if m:
record(m.group(2), int(m.group(1)), 'removed')
m = re.match(r'^#define __ARM_NR_(\w+)\s+\(__ARM_NR_BASE\+\s*(\d+)\)',
line)
if m:
record('ARM_'+m.group(1), 0x0f0000+int(m.group(2)))
continue
print('</syscalls_info>')
| gpl-2.0 | -810,369,600,646,236,000 | -5,094,651,808,278,628,000 | 28.316667 | 74 | 0.603184 | false |
kose-y/pylearn2 | pylearn2/scripts/papers/dropout/tests/test_dropout.py | 46 | 2057 | """
Unit tests for dropout paper
"""
import os
from pylearn2.scripts.tests.yaml_testing import limited_epoch_train
from pylearn2.testing.skip import skip_if_no_data
from theano import config
from theano.compile import get_default_mode
yaml_file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..'))
save_path = os.path.dirname(os.path.realpath(__file__))
def test_mnist_valid():
"""
Tests mnist_valid.yaml by running it for only one epoch
"""
skip_if_no_data()
mode = get_default_mode()
if hasattr(mode, 'check_py_code'):
old_value = mode.check_py_code
mode.check_py_code = False
try:
if config.mode == "DEBUG_MODE":
yaml_file = 'mnist_valid_fast'
else:
yaml_file = 'mnist_valid'
limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'
% yaml_file))
try:
os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))
os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))
except Exception:
pass
finally:
if hasattr(mode, 'check_py_code'):
mode.check_py_code = old_value
def test_mnist():
"""
Tests mnist.yaml by running it for only one epoch
"""
skip_if_no_data()
mode = get_default_mode()
if hasattr(mode, 'check_py_code'):
old_value = mode.check_py_code
mode.check_py_code = False
try:
if config.mode == "DEBUG_MODE":
yaml_file = 'mnist_fast'
else:
yaml_file = 'mnist'
limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'
% yaml_file))
try:
os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))
os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))
except Exception:
pass
finally:
if hasattr(mode, 'check_py_code'):
mode.check_py_code = old_value
| bsd-3-clause | -2,335,601,229,013,211,600 | -6,018,818,628,739,638,000 | 30.646154 | 73 | 0.54983 | false |
ultimanet/nifty | rg/powerspectrum.py | 1 | 26583 | ## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
## TODO: cythonize
from __future__ import division
import numpy as np
def draw_vector_nd(axes,dgrid,ps,symtype=0,fourier=False,zerocentered=False,kpack=None):
"""
Draws a n-dimensional field on a regular grid from a given power
spectrum. The grid parameters need to be specified, together with a
couple of global options explained below. The dimensionality of the
field is determined automatically.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
ps : ndarray
The power spectrum as a function of Fourier modes.
symtype : int {0,1,2} : *optional*
Whether the output should be real valued (0), complex-hermitian (1)
or complex without symmetry (2). (default=0)
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
field : ndarray
The drawn random field.
"""
if(kpack is None):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier))
klength = nklength(kdict)
else:
kdict = kpack[1][np.fft.ifftshift(kpack[0],axes=shiftaxes(zerocentered,st_to_zero_mode=False))]
klength = kpack[1]
#output is in position space
if(not fourier):
#output is real-valued
if(symtype==0):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.real(np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered)))
else:
return np.real(np.fft.ifftn(vector))
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawwild(klength,kdict,ps,real_corr=2)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(np.real(vector)),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(np.real(vector))
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(np.fft.ifftn(vector),axes=shiftaxes(zerocentered))
else:
return np.fft.ifftn(vector)
#output is in fourier space
else:
#output is real-valued
if(symtype==0):
vector = drawwild(klength,kdict,ps,real_corr=2)
if np.any(zerocentered == True):
return np.real(np.fft.fftshift(vector,axes=shiftaxes(zerocentered)))
else:
return np.real(vector)
#output is complex with hermitian symmetry
elif(symtype==1):
vector = drawherm(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#output is complex without symmetry
else:
vector = drawwild(klength,kdict,ps)
if(np.any(zerocentered==True)):
return np.fft.fftshift(vector,axes=shiftaxes(zerocentered))
else:
return vector
#def calc_ps(field,axes,dgrid,zerocentered=False,fourier=False):
#
# """
# Calculates the power spectrum of a given field assuming that the field
# is statistically homogenous and isotropic.
#
# Parameters
# ----------
# field : ndarray
# The input field from which the power spectrum should be determined.
#
# axes : ndarray
# An array with the length of each axis.
#
# dgrid : ndarray
# An array with the pixel length of each axis.
#
# zerocentered : bool : *optional*
# Whether the output array should be zerocentered, i.e. starting with
# negative Fourier modes going over the zero mode to positive modes,
# or not zerocentered, where zero, positive and negative modes are
# simpy ordered consecutively.
#
# fourier : bool : *optional*
# Whether the output should be in Fourier space or not
# (default=False).
#
# """
#
# ## field absolutes
# if(not fourier):
# foufield = np.fft.fftshift(np.fft.fftn(field))
# elif(np.any(zerocentered==False)):
# foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
# else:
# foufield = field
# fieldabs = np.abs(foufield)**2
#
# kdict = nkdict_fast(axes,dgrid,fourier)
# klength = nklength(kdict)
#
# ## power spectrum
# ps = np.zeros(klength.size)
# rho = np.zeros(klength.size)
# for ii in np.ndindex(kdict.shape):
# position = np.searchsorted(klength,kdict[ii])
# rho[position] += 1
# ps[position] += fieldabs[ii]
# ps = np.divide(ps,rho)
# return ps
def calc_ps_fast(field,axes,dgrid,zerocentered=False,fourier=False,pindex=None,kindex=None,rho=None):
"""
Calculates the power spectrum of a given field faster assuming that the
field is statistically homogenous and isotropic.
Parameters
----------
field : ndarray
The input field from which the power spectrum should be determined.
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool : *optional*
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
"""
## field absolutes
if(not fourier):
foufield = np.fft.fftshift(np.fft.fftn(field))
elif(np.any(zerocentered==False)):
foufield = np.fft.fftshift(field, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
foufield = field
fieldabs = np.abs(foufield)**2
if(rho is None):
if(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
rho = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
rho[position] += 1
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(np.max(pindex)+1)
rho = np.zeros(ps.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
rho[pindex[ii]] += 1
elif(pindex is None):
## kdict
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
else:
klength = kindex
## power spectrum
ps = np.zeros(klength.size)
for ii in np.ndindex(kdict.shape):
position = np.searchsorted(klength,kdict[ii])
ps[position] += fieldabs[ii]
else:
## zerocenter pindex
if(np.any(zerocentered==False)):
pindex = np.fft.fftshift(pindex, axes=shiftaxes(zerocentered,st_to_zero_mode=True))
## power spectrum
ps = np.zeros(rho.size)
for ii in np.ndindex(pindex.shape):
ps[pindex[ii]] += fieldabs[ii]
ps = np.divide(ps,rho)
return ps
def get_power_index(axes,dgrid,zerocentered,irred=False,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index or {klength, rho} : scalar or list
Returns either an array of all k-vector lengths and
their degeneracy factors or just the power index array
depending on the flag irred.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
if(irred):
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
rho[np.searchsorted(klength,kdict[ii])] += 1
return klength,rho
else:
ind = np.empty(axes,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
return ind
def get_power_indices(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
ind = np.empty(axes,dtype=np.int)
rho = np.zeros(klength.shape,dtype=np.int)
for ii in np.ndindex(kdict.shape):
ind[ii] = np.searchsorted(klength,kdict[ii])
rho[ind[ii]] += 1
return ind,klength,rho
def get_power_indices2(axes,dgrid,zerocentered,fourier=True):
"""
Returns the index of the Fourier grid points in a numpy
array, ordered following the zerocentered flag.
Parameters
----------
axes : ndarray
An array with the length of each axis.
dgrid : ndarray
An array with the pixel length of each axis.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
irred : bool : *optional*
If True, the function returns an array of all k-vector lengths and
their degeneracy factors. If False, just the power index array is
returned.
fourier : bool : *optional*
Whether the output should be in Fourier space or not
(default=False).
Returns
-------
index, klength, rho : ndarrays
Returns the power index array, an array of all k-vector lengths and
their degeneracy factors.
"""
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict_fast2(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict_fast2(axes,dgrid,fourier)
klength,rho,ind = nkdict_to_indices(kdict)
return ind,klength,rho
def nkdict_to_indices(kdict):
kindex,pindex = np.unique(kdict,return_inverse=True)
pindex = pindex.reshape(kdict.shape)
rho = pindex.flatten()
rho.sort()
rho = np.unique(rho,return_index=True,return_inverse=False)[1]
rho = np.append(rho[1:]-rho[:-1],[np.prod(pindex.shape)-rho[-1]])
return kindex,rho,pindex
def bin_power_indices(pindex,kindex,rho,log=False,nbin=None,binbounds=None):
"""
Returns the (re)binned power indices associated with the Fourier grid.
Parameters
----------
pindex : ndarray
Index of the Fourier grid points in a numpy.ndarray ordered
following the zerocentered flag (default=None).
kindex : ndarray
Array of all k-vector lengths (default=None).
rho : ndarray
Degeneracy of the Fourier grid, indicating how many k-vectors in
Fourier space have the same length (default=None).
log : bool
Flag specifying if the binning is performed on logarithmic scale
(default: False).
nbin : integer
Number of used bins (default: None).
binbounds : {list, array}
Array-like inner boundaries of the used bins (default: None).
Returns
-------
pindex, kindex, rho : ndarrays
The (re)binned power indices.
"""
## boundaries
if(binbounds is not None):
binbounds = np.sort(binbounds)
## equal binning
else:
if(log is None):
log = False
if(log):
k = np.r_[0,np.log(kindex[1:])]
else:
k = kindex
dk = np.max(k[2:]-k[1:-1]) ## minimal dk
if(nbin is None):
nbin = int((k[-1]-0.5*(k[2]+k[1]))/dk-0.5) ## maximal nbin
else:
nbin = min(int(nbin),int((k[-1]-0.5*(k[2]+k[1]))/dk+2.5))
dk = (k[-1]-0.5*(k[2]+k[1]))/(nbin-2.5)
binbounds = np.r_[0.5*(3*k[1]-k[2]),0.5*(k[1]+k[2])+dk*np.arange(nbin-2)]
if(log):
binbounds = np.exp(binbounds)
## reordering
reorder = np.searchsorted(binbounds,kindex)
rho_ = np.zeros(len(binbounds)+1,dtype=rho.dtype)
kindex_ = np.empty(len(binbounds)+1,dtype=kindex.dtype)
for ii in range(len(reorder)):
if(rho_[reorder[ii]]==0):
kindex_[reorder[ii]] = kindex[ii]
rho_[reorder[ii]] += rho[ii]
else:
kindex_[reorder[ii]] = (kindex_[reorder[ii]]*rho_[reorder[ii]]+kindex[ii]*rho[ii])/(rho_[reorder[ii]]+rho[ii])
rho_[reorder[ii]] += rho[ii]
return reorder[pindex],kindex_,rho_
def nhermitianize(field,zerocentered):
"""
Hermitianizes an arbitrary n-dimensional field. Becomes relatively slow
for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
# for index in np.ndenumerate(field):
# negind = tuple(-np.array(index[0]))
# field[negind] = np.conjugate(index[1])
# if(field[negind]==field[index[0]]):
# field[index[0]] = np.abs(index[1])*(np.sign(index[1].real)+(np.sign(index[1].real)==0)*np.sign(index[1].imag)).astype(np.int)
subshape = np.array(field.shape,dtype=np.int) ## == axes
maxindex = subshape//2
subshape[np.argmax(subshape)] = subshape[np.argmax(subshape)]//2+1 ## ~half larges axis
for ii in np.ndindex(tuple(subshape)):
negii = tuple(-np.array(ii))
field[negii] = np.conjugate(field[ii])
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] = np.abs(field[index])*(np.sign(field[index].real)+(np.sign(field[index].real)==0)*-np.sign(field[index].imag)).astype(np.int) ## minus since overwritten before
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def nhermitianize_fast(field,zerocentered,special=False):
"""
Hermitianizes an arbitrary n-dimensional field faster.
Still becomes comparably slow for large n.
Parameters
----------
field : ndarray
The input field that should be hermitianized.
zerocentered : bool
Whether the output array should be zerocentered, i.e. starting with
negative Fourier modes going over the zero mode to positive modes,
or not zerocentered, where zero, positive and negative modes are
simpy ordered consecutively.
special : bool, *optional*
Must be True for random fields drawn from Gaussian or pm1
distributions.
Returns
-------
hermfield : ndarray
The hermitianized field.
"""
## shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field, axes=shiftaxes(zerocentered))
dummy = np.conjugate(field)
## mirror conjugate field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
if(special): ## special normalisation for certain random fields
field = np.sqrt(0.5)*(field+dummy)
maxindex = np.array(field.shape,dtype=np.int)//2
for ii in np.ndindex((2,)*maxindex.size):
index = tuple(ii*maxindex)
field[index] *= np.sqrt(0.5)
else: ## regular case
field = 0.5*(field+dummy)
## reshift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return field
def random_hermitian_pm1(datatype,zerocentered,shape):
"""
Draws a set of hermitianized random, complex pm1 numbers.
"""
field = np.random.randint(4,high=None,size=np.prod(shape,axis=0,dtype=np.int,out=None)).reshape(shape,order='C')
dummy = np.copy(field)
## mirror field
for ii in range(field.ndim):
dummy = np.swapaxes(dummy,0,ii)
dummy = np.flipud(dummy)
dummy = np.roll(dummy,1,axis=0)
dummy = np.swapaxes(dummy,0,ii)
field = (field+dummy+2*(field>dummy)*((field+dummy)%2))%4 ## wicked magic
x = np.array([1+0j,0+1j,-1+0j,0-1j],dtype=datatype)[field]
## (re)shift zerocentered axes
if(np.any(zerocentered==True)):
field = np.fft.fftshift(field,axes=shiftaxes(zerocentered))
return x
#-----------------------------------------------------------------------------
# Auxiliary functions
#-----------------------------------------------------------------------------
def shiftaxes(zerocentered,st_to_zero_mode=False):
"""
Shifts the axes in a special way needed for some functions
"""
axes = []
for ii in range(len(zerocentered)):
if(st_to_zero_mode==False)and(zerocentered[ii]):
axes += [ii]
if(st_to_zero_mode==True)and(not zerocentered[ii]):
axes += [ii]
return axes
def nkdict(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/axes[i]/dgrid[i] for i in range(len(axes))])
kdict = np.empty(axes)
for ii in np.ndindex(kdict.shape):
kdict[ii] = np.sqrt(np.sum(((ii-axes//2)*dk)**2))
return kdict
def nkdict_fast(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
temp_vecs = np.array(np.where(np.ones(axes)),dtype='float').reshape(np.append(len(axes),axes))
temp_vecs = np.rollaxis(temp_vecs,0,len(temp_vecs.shape))
temp_vecs -= axes//2
temp_vecs *= dk
temp_vecs *= temp_vecs
return np.sqrt(np.sum((temp_vecs),axis=-1))
def nkdict_fast2(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
inds = []
for a in axes:
inds += [slice(0,a)]
cords = np.ogrid[inds]
dists = ((cords[0]-axes[0]//2)*dk[0])**2
for ii in range(1,len(axes)):
dists = dists + ((cords[ii]-axes[ii]//2)*dk[ii])**2
dists = np.sqrt(dists)
return dists
def nklength(kdict):
return np.sort(list(set(kdict.flatten())))
#def drawherm(vector,klength,kdict,ps): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[klength==kdict[ii]]))*np.random.normal(0.,1.)
# return vector
def drawherm(klength,kdict,ps):
"""
Draws a hermitian random field from a Gaussian distribution.
"""
# vector = np.zeros(kdict.shape,dtype='complex')
# for ii in np.ndindex(vector.shape):
# if(vector[ii]==np.complex(0.,0.)):
# vector[ii] = np.sqrt(0.5*ps[np.searchsorted(klength,kdict[ii])])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# negii = tuple(-np.array(ii))
# vector[negii] = np.conjugate(vector[ii])
# if(vector[negii]==vector[ii]):
# vector[ii] = np.float(np.sqrt(ps[np.searchsorted(klength,kdict[ii])]))*np.random.normal(0.,1.)
# return vector
vec = np.random.normal(loc=0,scale=1,size=kdict.size).reshape(kdict.shape)
vec = np.fft.fftn(vec)/np.sqrt(np.prod(kdict.shape))
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
#def drawwild(vector,klength,kdict,ps,real_corr=1): ## vector = np.zeros(kdict.shape,dtype=np.complex)
# for ii in np.ndindex(vector.shape):
# vector[ii] = np.sqrt(real_corr*0.5*ps[klength==kdict[ii]])*np.complex(np.random.normal(0.,1.),np.random.normal(0.,1.))
# return vector
def drawwild(klength,kdict,ps,real_corr=1):
"""
Draws a field of arbitrary symmetry from a Gaussian distribution.
"""
vec = np.empty(kdict.size,dtype=np.complex)
vec.real = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec.imag = np.random.normal(loc=0,scale=np.sqrt(real_corr*0.5),size=kdict.size)
vec = vec.reshape(kdict.shape)
for ii in np.ndindex(kdict.shape):
vec[ii] *= np.sqrt(ps[np.searchsorted(klength,kdict[ii])])
return vec
| gpl-3.0 | -7,850,523,802,813,901,000 | 1,813,976,547,448,679,200 | 33.703655 | 181 | 0.600271 | false |
MackZxh/OCA-Choice | hr/hr_contract_multi_jobs/__openerp__.py | 13 | 1494 | # -*- coding:utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Savoir-faire Linux. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'HR Contract Multi Jobs',
'version': '8.0.1.0.0',
'license': 'AGPL-3',
'category': 'Generic Modules/Human Resources',
'author': 'Savoir-faire Linux, '
'Fekete Mihai (Forest and Biomass Services Romania), '
'Odoo Community Association (OCA)',
'website': 'https://www.savoirfairelinux.com/',
'depends': [
'hr_contract'
],
'data': [
'security/ir.model.access.csv',
'views/hr_contract_view.xml',
],
'test': [],
'demo': [],
'installable': True,
}
| lgpl-3.0 | 9,170,509,122,890,980,000 | 3,308,857,009,521,425,000 | 36.35 | 78 | 0.574967 | false |
stackforge/tacker | tacker/tests/unit/conductor/conductorrpc/test_vnf_pkgm_rpc.py | 2 | 4155 | # Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tacker.common.rpc import BackingOffClient
from tacker.conductor.conductorrpc import vnf_pkgm_rpc
from tacker.objects import vnf_package
from tacker.tests import base
from tacker.tests.unit.conductor import fakes
class VnfPackageRPCTestCase(base.BaseTestCase):
def setUp(self):
super(VnfPackageRPCTestCase, self).setUp()
self.context = self.fake_admin_context()
self.rpc_api = vnf_pkgm_rpc.VNFPackageRPCAPI()
self.cctxt_mock = mock.MagicMock()
def test_upload_vnf_package_content(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(
self.context, **fakes.VNF_UPLOAD_VNF_PACKAGE_CONTENT)
self.rpc_api.upload_vnf_package_content(self.context,
vnf_package_obj, cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'upload_vnf_package_content',
vnf_package=vnf_package_obj)
_test()
def test_upload_vnf_package_from_uri(self):
fake_addressInformation = "http://test_csar.zip"
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.upload_vnf_package_from_uri(self.context,
vnf_package_obj,
fake_addressInformation,
cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'upload_vnf_package_from_uri',
vnf_package=vnf_package_obj,
address_information=fake_addressInformation,
password=None, user_name=None)
_test()
def test_delete_vnf_package(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.delete_vnf_package(self.context,
vnf_package_obj, cast=True)
prepare_mock.assert_called()
self.cctxt_mock.cast.assert_called_once_with(
self.context, 'delete_vnf_package',
vnf_package=vnf_package_obj)
_test()
def test_get_vnf_package_vnfd(self):
@mock.patch.object(BackingOffClient, 'prepare')
def _test(prepare_mock):
prepare_mock.return_value = self.cctxt_mock
vnf_package_obj = vnf_package.VnfPackage(self.context,
**fakes.VNF_DATA)
self.rpc_api.get_vnf_package_vnfd(self.context,
vnf_package_obj, cast=False)
prepare_mock.assert_called()
self.cctxt_mock.call.assert_called_once_with(
self.context, 'get_vnf_package_vnfd',
vnf_package=vnf_package_obj)
_test()
| apache-2.0 | 2,262,684,924,705,268,000 | -7,132,986,498,575,124,000 | 42.28125 | 78 | 0.581227 | false |
fnordahl/nova | nova/exception.py | 1 | 56858 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
import webob.exc
from webob import util as woutil
from nova.i18n import _, _LE
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code, title="", explanation=""):
self.code = code
# There is a strict rule about constructing status line for HTTP:
# '...Status-Line, consisting of the protocol version followed by a
# numeric status code and its associated textual phrase, with each
# element separated by SP characters'
# (http://www.faqs.org/rfcs/rfc2616.html)
# 'code' and 'title' can not be empty because they correspond
# to numeric status code and its associated text
if title:
self.title = title
else:
try:
self.title = woutil.status_reasons[self.code]
except KeyError:
msg = _LE("Improper or unknown HTTP status code used: %d")
LOG.error(msg, code)
self.title = woutil.status_generic_reasons[self.code // 100]
self.explanation = explanation
super(ConvertedException, self).__init__()
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return {k: v for k, v in six.iteritems(original) if "_pass" not in k}
def wrap_exception(notifier=None, get_notifier=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It also optionally sends the exception to the notification
system.
"""
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception as e:
with excutils.save_and_reraise_exception():
if notifier or get_notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, context,
*args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
event_type = f.__name__
(notifier or get_notifier()).error(context,
event_type,
payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in six.iteritems(kwargs):
LOG.error("%s: %s" % (name, value)) # noqa
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
else:
# at least get the core message out if something happened
message = self.msg_fmt
self.message = message
super(NovaException, self).__init__(message)
def format_message(self):
# NOTE(mrodden): use the first argument to the python Exception object
# which should be our full NovaException message, (see __init__)
return self.args[0]
class EncryptionFailure(NovaException):
msg_fmt = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
msg_fmt = _("Failed to decrypt text: %(reason)s")
class RevokeCertFailure(NovaException):
msg_fmt = _("Failed to revoke certificate for %(project_id)s")
class VirtualInterfaceCreateException(NovaException):
msg_fmt = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
msg_fmt = _("Creation of virtual interface with "
"unique mac address failed")
class VirtualInterfacePlugException(NovaException):
msg_fmt = _("Virtual interface plugin failed")
class GlanceConnectionFailed(NovaException):
msg_fmt = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
class Forbidden(NovaException):
ec2_code = 'AuthFailure'
msg_fmt = _("Not authorized.")
code = 403
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
class PolicyNotAuthorized(Forbidden):
msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
class VolumeLimitExceeded(Forbidden):
msg_fmt = _("Volume resource quota exceeded")
class ImageNotActive(NovaException):
# NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
# but it still seems like the most appropriate option.
ec2_code = 'IncorrectState'
msg_fmt = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
msg_fmt = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
msg_fmt = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class InvalidBDMImage(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"failed to get image %(id)s.")
class InvalidBDMBootSequence(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"Boot sequence for the instance "
"and image/block device mapping "
"combination is not valid.")
class InvalidBDMLocalsLimit(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"You specified more local devices than the "
"limit allows")
class InvalidBDMEphemeralSize(InvalidBDM):
msg_fmt = _("Ephemeral disks requested are larger than "
"the instance type allows.")
class InvalidBDMSwapSize(InvalidBDM):
msg_fmt = _("Swap drive requested is larger than instance type allows.")
class InvalidBDMFormat(InvalidBDM):
msg_fmt = _("Block Device Mapping is Invalid: "
"%(details)s")
class InvalidBDMForLegacy(InvalidBDM):
msg_fmt = _("Block Device Mapping cannot "
"be converted to legacy format. ")
class InvalidBDMVolumeNotBootable(InvalidBDM):
msg_fmt = _("Block Device %(id)s is not bootable.")
class InvalidAttribute(Invalid):
msg_fmt = _("Attribute not supported: %(attr)s")
class ValidationError(Invalid):
msg_fmt = "%(detail)s"
class VolumeUnattached(Invalid):
ec2_code = 'IncorrectState'
msg_fmt = _("Volume %(volume_id)s is not attached to anything")
class VolumeNotCreated(NovaException):
msg_fmt = _("Volume %(volume_id)s did not finish being created"
" even after we waited %(seconds)s seconds or %(attempts)s"
" attempts. And its status is %(volume_status)s.")
class VolumeEncryptionNotSupported(Invalid):
msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
"volume %(volume_id)s")
class InvalidKeypair(Invalid):
ec2_code = 'InvalidKeyPair.Format'
msg_fmt = _("Keypair data is invalid: %(reason)s")
class InvalidRequest(Invalid):
msg_fmt = _("The request is invalid.")
class InvalidInput(Invalid):
msg_fmt = _("Invalid input received: %(reason)s")
class InvalidVolume(Invalid):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("Invalid volume: %(reason)s")
class InvalidVolumeAccessMode(Invalid):
msg_fmt = _("Invalid volume access mode: %(access_mode)s")
class InvalidMetadata(Invalid):
msg_fmt = _("Invalid metadata: %(reason)s")
class InvalidMetadataSize(Invalid):
msg_fmt = _("Invalid metadata size: %(reason)s")
class InvalidPortRange(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
msg_fmt = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
class InvalidAPIVersionString(Invalid):
msg_fmt = _("API Version String %(version)s is of invalid format. Must "
"be of format MajorNum.MinorNum.")
class VersionNotFoundForAPIMethod(Invalid):
msg_fmt = _("API version %(version)s is not supported on this method.")
class InvalidGlobalAPIVersion(Invalid):
msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
"is %(min_ver)s and maximum is %(max_ver)s.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
ec2_code = 'InvalidParameterValue'
msg_fmt = _("%(err)s")
class InvalidAggregateAction(Invalid):
msg_fmt = _("Unacceptable parameters.")
code = 400
class InvalidAggregateActionAdd(InvalidAggregateAction):
msg_fmt = _("Cannot add host to aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionDelete(InvalidAggregateAction):
msg_fmt = _("Cannot remove host from aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdate(InvalidAggregateAction):
msg_fmt = _("Cannot update aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
msg_fmt = _("Cannot update metadata of aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
msg_fmt = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
msg_fmt = _("Sort key supplied was not valid.")
class InvalidStrTime(Invalid):
msg_fmt = _("Invalid datetime string: %(reason)s")
class InstanceInvalidState(Invalid):
msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
msg_fmt = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
msg_fmt = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
msg_fmt = _("Failed to suspend instance: %(reason)s")
class InstanceResumeFailure(Invalid):
msg_fmt = _("Failed to resume instance: %(reason)s")
class InstancePowerOnFailure(Invalid):
msg_fmt = _("Failed to power on instance: %(reason)s")
class InstancePowerOffFailure(Invalid):
msg_fmt = _("Failed to power off instance: %(reason)s")
class InstanceRebootFailure(Invalid):
msg_fmt = _("Failed to reboot instance: %(reason)s")
class InstanceTerminationFailure(Invalid):
msg_fmt = _("Failed to terminate instance: %(reason)s")
class InstanceDeployFailure(Invalid):
msg_fmt = _("Failed to deploy instance: %(reason)s")
class MultiplePortsNotApplicable(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class InvalidFixedIpAndMaxCountRequest(Invalid):
msg_fmt = _("Failed to launch instances: %(reason)s")
class ServiceUnavailable(Invalid):
msg_fmt = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
msg_fmt = _("Insufficient compute resources: %(reason)s.")
class HypervisorUnavailable(NovaException):
msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
class ComputeServiceUnavailable(ServiceUnavailable):
msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
class ComputeServiceInUse(NovaException):
msg_fmt = _("Compute service of %(host)s is still in use.")
class UnableToMigrateToSelf(Invalid):
msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
msg_fmt = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
msg_fmt = _("The instance requires a newer hypervisor version than "
"has been provided.")
class ServiceTooOld(Invalid):
msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
"(v%(minver)i) version of the rest of the deployment. "
"Unable to continue.")
class DestinationDiskExists(Invalid):
msg_fmt = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
msg_fmt = _("The supplied device path (%(path)s) is in use.")
code = 409
class DeviceIsBusy(Invalid):
msg_fmt = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
msg_fmt = _("Unacceptable CPU info: %(reason)s")
class InvalidIpAddressError(Invalid):
msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
msg_fmt = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
class InvalidDiskInfo(Invalid):
msg_fmt = _("Disk info file is invalid: %(reason)s")
class DiskInfoReadWriteFail(Invalid):
msg_fmt = _("Failed to read or write disk info file: %(reason)s")
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
msg_fmt = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
msg_fmt = _("Invalid ID received %(id)s.")
class ConstraintNotMet(NovaException):
msg_fmt = _("Constraint not met.")
code = 412
class NotFound(NovaException):
msg_fmt = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
msg_fmt = _("No agent-build associated with id %(id)s.")
class AgentBuildExists(NovaException):
msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
"architecture %(architecture)s exists.")
class VolumeNotFound(NotFound):
ec2_code = 'InvalidVolume.NotFound'
msg_fmt = _("Volume %(volume_id)s could not be found.")
class BDMNotFound(NotFound):
msg_fmt = _("No Block Device Mapping with id %(id)s.")
class VolumeBDMNotFound(NotFound):
msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
class VolumeBDMPathNotFound(VolumeBDMNotFound):
msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
class SnapshotNotFound(NotFound):
ec2_code = 'InvalidSnapshot.NotFound'
msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
class DiskNotFound(NotFound):
msg_fmt = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
class ImageNotFound(NotFound):
msg_fmt = _("Image %(image_id)s could not be found.")
class PreserveEphemeralNotSupported(Invalid):
msg_fmt = _("The current driver does not support "
"preserving ephemeral partitions.")
# NOTE(jruzicka): ImageNotFound is not a valid EC2 error code.
class ImageNotFoundEC2(ImageNotFound):
msg_fmt = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
msg_fmt = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
msg_fmt = _("Cannot find SR to read/write VDI.")
class InstanceMappingNotFound(NotFound):
msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
class NetworkDuplicated(Invalid):
msg_fmt = _("Network %(network_id)s is duplicated.")
class NetworkDhcpReleaseFailed(NovaException):
msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
class NetworkSetHostFailed(NovaException):
msg_fmt = _("Network set host failed for network %(network_id)s.")
class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
class LabelTooLong(Invalid):
msg_fmt = _("Maximum allowed length for 'label' is 255.")
class InvalidIntValue(Invalid):
msg_fmt = _("%(key)s must be an integer.")
class InvalidCidr(Invalid):
msg_fmt = _("%(cidr)s is not a valid ip network.")
class InvalidAddress(Invalid):
msg_fmt = _("%(address)s is not a valid ip address.")
class AddressOutOfRange(Invalid):
msg_fmt = _("%(address)s is not within %(cidr)s.")
class DuplicateVlan(NovaException):
msg_fmt = _("Detected existing vlan with id %(vlan)d")
code = 409
class CidrConflict(NovaException):
msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
'with existing cidr (%(other)s)')
code = 409
class NetworkHasProject(NetworkInUse):
msg_fmt = _('Network must be disassociated from project '
'%(project_id)s before it can be deleted.')
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
msg_fmt = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
msg_fmt = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
msg_fmt = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
msg_fmt = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
msg_fmt = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
msg_fmt = _("No networks defined.")
class NoMoreNetworks(NovaException):
msg_fmt = _("No more available networks.")
class NetworkNotFoundForProject(NetworkNotFound):
msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class NetworkAmbiguous(Invalid):
msg_fmt = _("More than one possible network found. Specify "
"network ID(s) to select which one(s) to connect to.")
class NetworkRequiresSubnet(Invalid):
msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
" instances on.")
class ExternalNetworkAttachForbidden(Forbidden):
msg_fmt = _("It is not allowed to create an interface on "
"external network %(network_uuid)s")
class NetworkMissingPhysicalNetwork(NovaException):
msg_fmt = _("Physical network is missing for network %(network_uuid)s")
class VifDetailsMissingVhostuserSockPath(Invalid):
msg_fmt = _("vhostuser_sock_path not present in vif_details"
" for vif %(vif_id)s")
class VifDetailsMissingMacvtapParameters(Invalid):
msg_fmt = _("Parameters %(missing_params)s not present in"
" vif_details for vif %(vif_id)s. Check your Neutron"
" configuration to validate that the macvtap parameters are"
" correct.")
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(Invalid):
msg_fmt = _("Port %(port_id)s is still in use.")
class PortRequiresFixedIP(Invalid):
msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
class PortBindingFailed(Invalid):
msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
"logs for more information.")
class FixedIpExists(NovaException):
msg_fmt = _("Fixed ip %(address)s already exists.")
class FixedIpNotFound(NotFound):
msg_fmt = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
msg_fmt = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
msg_fmt = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAssociateFailed(NovaException):
msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
class FixedIpAlreadyInUse(NovaException):
msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
msg_fmt = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
msg_fmt = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
ec2_code = 'UnsupportedOperation'
msg_fmt = _("No fixed IP addresses available for network: %(net)s")
class NoFixedIpsDefined(NotFound):
msg_fmt = _("Zero fixed ips could be found.")
class FloatingIpExists(NovaException):
msg_fmt = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
msg_fmt = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
msg_fmt = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
msg_fmt = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
msg_fmt = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
msg_fmt = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
msg_fmt = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Interface %(interface)s not found.")
class FloatingIpAllocateFailed(NovaException):
msg_fmt = _("Floating IP allocate failed.")
class FloatingIpAssociateFailed(NovaException):
msg_fmt = _("Floating IP %(address)s association has failed.")
class FloatingIpBadRequest(Invalid):
ec2_code = "UnsupportedOperation"
msg_fmt = _("The floating IP request failed with a BadRequest")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
ec2_code = "UnsupportedOperation"
msg_fmt = _("Cannot disassociate auto assigned floating ip")
class KeypairNotFound(NotFound):
ec2_code = 'InvalidKeyPair.NotFound'
msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
class ServiceNotFound(NotFound):
msg_fmt = _("Service %(service_id)s could not be found.")
class ServiceBinaryExists(NovaException):
msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
class ServiceTopicExists(NovaException):
msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
class HostNotFound(NotFound):
msg_fmt = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
msg_fmt = _("Compute host %(host)s could not be found.")
class ComputeHostNotCreated(HostNotFound):
msg_fmt = _("Compute host %(name)s needs to be created first"
" before updating.")
class HostBinaryNotFound(NotFound):
msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
msg_fmt = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
msg_fmt = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class InvalidQuotaMethodUsage(Invalid):
msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
class QuotaExists(NovaException):
msg_fmt = _("Quota exists for project %(project_id)s, "
"resource %(resource)s")
class QuotaResourceUnknown(QuotaNotFound):
msg_fmt = _("Unknown quota resources %(unknown)s.")
class ProjectUserQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
"could not be found.")
class ProjectQuotaNotFound(QuotaNotFound):
msg_fmt = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
msg_fmt = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
msg_fmt = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
msg_fmt = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
msg_fmt = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
msg_fmt = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExists(Invalid):
ec2_code = 'InvalidGroup.Duplicate'
msg_fmt = _("Security group %(security_group_name)s already exists "
"for project %(project_id)s.")
class SecurityGroupExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
msg_fmt = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
msg_fmt = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
msg_fmt = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class SecurityGroupRuleExists(Invalid):
ec2_code = 'InvalidPermission.Duplicate'
msg_fmt = _("Rule already exists in group: %(rule)s")
class NoUniqueMatch(NovaException):
msg_fmt = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
msg_fmt = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
msg_fmt = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
msg_fmt = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolExists(NovaException):
msg_fmt = _("Console pool with host %(host)s, console_type "
"%(console_type)s and compute_host %(compute_host)s "
"already exists.")
class ConsolePoolNotFoundForHostType(NotFound):
msg_fmt = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
msg_fmt = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
msg_fmt = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
msg_fmt = _("Invalid console type %(console_type)s")
class ConsoleTypeUnavailable(Invalid):
msg_fmt = _("Unavailable console type %(console_type)s.")
class ConsolePortRangeExhausted(NovaException):
msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
"exhausted.")
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
class FlavorNotFoundByName(FlavorNotFound):
msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
class FlavorAccessNotFound(NotFound):
msg_fmt = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class FlavorExtraSpecUpdateCreateFailed(NovaException):
msg_fmt = _("Flavor %(id)d extra spec cannot be updated or created "
"after %(retries)d retries.")
class CellNotFound(NotFound):
msg_fmt = _("Cell %(cell_name)s doesn't exist.")
class CellExists(NovaException):
msg_fmt = _("Cell with name %(name)s already exists.")
class CellRoutingInconsistency(NovaException):
msg_fmt = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
msg_fmt = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
msg_fmt = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
msg_fmt = _("No cells available matching scheduling criteria.")
class CellsUpdateUnsupported(NovaException):
msg_fmt = _("Cannot update cells configuration file.")
class InstanceUnknownCell(NotFound):
msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
class FlavorExtraSpecsNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class ComputeHostMetricNotFound(NotFound):
msg_fmt = _("Metric %(name)s could not be found on the compute "
"host node %(host)s.%(node)s.")
class FileNotFound(NotFound):
msg_fmt = _("File %(file_path)s could not be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
msg_fmt = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
msg_fmt = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
class InstanceTagNotFound(NotFound):
msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
class RotationRequiredForBackup(NovaException):
msg_fmt = _("Rotation param is required for backup image_type")
class KeyPairExists(NovaException):
ec2_code = 'InvalidKeyPair.Duplicate'
msg_fmt = _("Key pair '%(key_name)s' already exists.")
class InstanceExists(NovaException):
msg_fmt = _("Instance %(name)s already exists.")
class FlavorExists(NovaException):
msg_fmt = _("Flavor with name %(name)s already exists.")
class FlavorIdExists(NovaException):
msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
class FlavorAccessExists(NovaException):
msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
msg_fmt = _("%(path)s is not on local storage: %(reason)s")
class StorageError(NovaException):
msg_fmt = _("Storage error: %(reason)s")
class MigrationError(NovaException):
msg_fmt = _("Migration error: %(reason)s")
class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
msg_fmt = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
msg_fmt = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
msg_fmt = _("Resize error: %(reason)s")
class CannotResizeDisk(NovaException):
msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
class FlavorMemoryTooSmall(NovaException):
msg_fmt = _("Flavor's memory is too small for requested image.")
class FlavorDiskTooSmall(NovaException):
msg_fmt = _("The created instance's disk would be too small.")
class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
"is %(flavor_size)i bytes, image is %(image_size)i bytes.")
class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
"image metadata. Flavor disk is %(flavor_size)i bytes, "
"minimum size is %(image_min_disk)i bytes.")
class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
msg_fmt = _("Volume is smaller than the minimum size specified in image "
"metadata. Volume size is %(volume_size)i bytes, minimum "
"size is %(image_min_disk)i bytes.")
class InsufficientFreeMemory(NovaException):
msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
class NoValidHost(NovaException):
msg_fmt = _("No valid host was found. %(reason)s")
class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
# NOTE(cyeoh): 413 should only be used for the ec2 API
# The error status code for out of quota for the nova api should be
# 403 Forbidden.
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
class FloatingIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of floating ips exceeded")
class FixedIpLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of fixed ips exceeded")
class MetadataLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
ec2_code = 'SecurityGroupLimitExceeded'
msg_fmt = _("Maximum number of security groups or rules exceeded")
class PortLimitExceeded(QuotaError):
msg_fmt = _("Maximum number of ports exceeded")
class AggregateError(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(NovaException):
msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class FlavorCreateFailed(NovaException):
msg_fmt = _("Unable to create flavor")
class InstancePasswordSetFailed(NovaException):
msg_fmt = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class InvalidAssociation(NotFound):
ec2_code = 'InvalidAssociationID.NotFound'
msg_fmt = _("Invalid association.")
class MarkerNotFound(NotFound):
msg_fmt = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")")
ec2_code = 'InvalidInstanceID.Malformed'
class InvalidVolumeIDMalformed(Invalid):
msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")")
ec2_code = 'InvalidVolumeID.Malformed'
class CouldNotFetchImage(NovaException):
msg_fmt = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
msg_fmt = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
msg_fmt = _("Instance %(instance_uuid)s is locked")
class ConfigDriveInvalidValue(Invalid):
msg_fmt = _("Invalid value for Config Drive option: %(option)s")
class ConfigDriveMountFailed(NovaException):
msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
msg_fmt = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
msg_fmt = _("Failed to attach network adapter device to "
"%(instance_uuid)s")
class InterfaceDetachFailed(Invalid):
msg_fmt = _("Failed to detach network adapter device from "
"%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
msg_fmt = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
msg_fmt = _("User data needs to be valid base 64.")
class InstanceUpdateConflict(NovaException):
msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
"Expected: %(expected)s. Actual: %(actual)s")
class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
"unable to determine the cause")
class UnexpectedTaskStateError(InstanceUpdateConflict):
pass
class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
pass
class InstanceActionNotFound(NovaException):
msg_fmt = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
msg_fmt = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
msg_fmt = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
class ServiceGroupUnavailable(NovaException):
msg_fmt = _("The service from servicegroup driver %(driver)s is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
msg_fmt = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
class BuildAbortException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
class RescheduledException(NovaException):
msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
"%(reason)s")
class ShadowTableExists(NovaException):
msg_fmt = _("Shadow table with name %(name)s already exists.")
class InstanceFaultRollback(NovaException):
def __init__(self, inner_exception=None):
message = _("Instance rollback performed due to: %s")
self.inner_exception = inner_exception
super(InstanceFaultRollback, self).__init__(message % inner_exception)
class OrphanedObjectError(NovaException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class ObjectActionError(NovaException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class CoreAPIMissing(NovaException):
msg_fmt = _("Core API extensions are missing: %(missing_apis)s")
class AgentError(NovaException):
msg_fmt = _('Error during following call to agent: %(method)s')
class AgentTimeout(AgentError):
msg_fmt = _('Unable to contact guest agent. '
'The following call timed out: %(method)s')
class AgentNotImplemented(AgentError):
msg_fmt = _('Agent does not support the call: %(method)s')
class InstanceGroupNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s could not be found.")
class InstanceGroupIdExists(NovaException):
msg_fmt = _("Instance group %(group_uuid)s already exists.")
class InstanceGroupMemberNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no member with "
"id %(instance_id)s.")
class InstanceGroupPolicyNotFound(NotFound):
msg_fmt = _("Instance group %(group_uuid)s has no policy %(policy)s.")
class InstanceGroupSaveException(NovaException):
msg_fmt = _("%(field)s should not be part of the updates.")
class PluginRetriesExceeded(NovaException):
msg_fmt = _("Number of retries to plugin (%(num_retries)d) exceeded.")
class ImageDownloadModuleError(NovaException):
msg_fmt = _("There was an error with the download module %(module)s. "
"%(reason)s")
class ImageDownloadModuleMetaDataError(ImageDownloadModuleError):
msg_fmt = _("The metadata for this location will not work with this "
"module %(module)s. %(reason)s.")
class ImageDownloadModuleNotImplementedError(ImageDownloadModuleError):
msg_fmt = _("The method %(method_name)s is not implemented.")
class ImageDownloadModuleConfigurationError(ImageDownloadModuleError):
msg_fmt = _("The module %(module)s is misconfigured: %(reason)s.")
class ResourceMonitorError(NovaException):
msg_fmt = _("Error when creating resource monitor: %(monitor)s")
class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
class PciDeviceInvalidAddressField(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI address %(address)s has an invalid %(field)s.")
class PciDeviceInvalidDeviceName(NovaException):
msg_fmt = _("Invalid PCI Whitelist: "
"The PCI whitelist can specify devname or address,"
" but not both")
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
class PciDeviceNotFound(NotFound):
msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
class PciDeviceInvalidStatus(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is %(status)s "
"instead of %(hopestatus)s")
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
"instead of %(hopeowner)s")
class PciDeviceRequestFailed(NovaException):
msg_fmt = _(
"PCI device request (%requests)s failed")
class PciDevicePoolEmpty(NovaException):
msg_fmt = _(
"Attempt to consume PCI device %(compute_node_id)s:%(address)s "
"from empty pool")
class PciInvalidAlias(Invalid):
msg_fmt = _("Invalid PCI alias definition: %(reason)s")
class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
class MissingParameter(NovaException):
ec2_code = 'MissingParameter'
msg_fmt = _("Not enough parameters: %(reason)s")
code = 400
class PciConfigInvalidWhitelist(Invalid):
msg_fmt = _("Invalid PCI devices Whitelist config %(reason)s")
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
ec2_code = 'InternalError'
msg_fmt = "%(err)s"
class PciDevicePrepareFailed(NovaException):
msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
"%(instance_uuid)s: %(reason)s")
class PciDeviceDetachFailed(NovaException):
msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
class PciDeviceUnsupportedHypervisor(NovaException):
msg_fmt = _("%(type)s hypervisor does not support PCI devices")
class KeyManagerError(NovaException):
msg_fmt = _("Key manager error: %(reason)s")
class VolumesNotRemoved(Invalid):
msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
class InvalidVideoMode(Invalid):
msg_fmt = _("Provided video model (%(model)s) is not supported.")
class RngDeviceNotExist(Invalid):
msg_fmt = _("The provided RNG device path: (%(path)s) is not "
"present on the host.")
class RequestedVRamTooHigh(NovaException):
msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
"than the maximum allowed by flavor %(max_vram)d.")
class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
class NoLiveMigrationForConfigDriveInLibVirt(NovaException):
msg_fmt = _("Live migration of instances with config drives is not "
"supported in libvirt unless libvirt instance path and "
"drive data is shared across compute nodes.")
class LiveMigrationWithOldNovaNotSafe(NovaException):
msg_fmt = _("Host %(server)s is running an old version of Nova, "
"live migrations involving that version may cause data loss. "
"Upgrade Nova on %(server)s and try again.")
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
class ImageVCPULimitsRangeExceeded(Invalid):
msg_fmt = _("Image vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPUTopologyRangeExceeded(Invalid):
msg_fmt = _("Image vCPU topology %(sockets)d:%(cores)d:%(threads)d "
"exceeds permitted %(maxsockets)d:%(maxcores)d:%(maxthreads)d")
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
class InvalidArchitectureName(Invalid):
msg_fmt = _("Architecture name '%(arch)s' is not recognised")
class ImageNUMATopologyIncomplete(Invalid):
msg_fmt = _("CPU and memory allocation must be provided for all "
"NUMA nodes")
class ImageNUMATopologyForbidden(Forbidden):
msg_fmt = _("Image property '%(name)s' is not permitted to override "
"NUMA configuration set against the flavor")
class ImageNUMATopologyAsymmetric(Invalid):
msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
"of CPUs and memory to nodes in image or flavor")
class ImageNUMATopologyCPUOutOfRange(Invalid):
msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
class ImageNUMATopologyCPUDuplicates(Invalid):
msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
class ImageNUMATopologyCPUsUnassigned(Invalid):
msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
class ImageNUMATopologyMemoryOutOfRange(Invalid):
msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
"%(memtotal)d MB")
class InvalidHostname(Invalid):
msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
class NumaTopologyNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
class MigrationContextNotFound(NotFound):
msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
"context.")
class SocketPortRangeExhaustedException(NovaException):
msg_fmt = _("Not able to acquire a free port for %(host)s")
class SocketPortInUseException(NovaException):
msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
class ImageSerialPortNumberInvalid(Invalid):
msg_fmt = _("Number of serial ports '%(num_ports)s' specified in "
"'%(property)s' isn't valid.")
class ImageSerialPortNumberExceedFlavorValue(Invalid):
msg_fmt = _("Forbidden to exceed flavor value of number of serial "
"ports passed in image meta.")
class InvalidImageConfigDrive(Invalid):
msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
class InvalidHypervisorVirtType(Invalid):
msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
"recognised")
class InvalidVirtualMachineMode(Invalid):
msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
class InvalidToken(Invalid):
msg_fmt = _("The token '%(token)s' is invalid or has expired")
class InvalidConnectionInfo(Invalid):
msg_fmt = _("Invalid Connection Info")
class InstanceQuiesceNotSupported(Invalid):
msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
class QemuGuestAgentNotEnabled(Invalid):
msg_fmt = _('QEMU guest agent is not enabled')
class SetAdminPasswdNotSupported(Invalid):
msg_fmt = _('Set admin password is not supported')
class MemoryPageSizeInvalid(Invalid):
msg_fmt = _("Invalid memory page size '%(pagesize)s'")
class MemoryPageSizeForbidden(Invalid):
msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
class CPUPinningNotSupported(Invalid):
msg_fmt = _("CPU pinning is not supported by the host: "
"%(reason)s")
class CPUPinningInvalid(Invalid):
msg_fmt = _("Cannot pin/unpin cpus %(requested)s from the following "
"pinned set %(pinned)s")
class CPUPinningUnknown(Invalid):
msg_fmt = _("CPU set to pin/unpin %(requested)s must be a subset of "
"known CPU set %(cpuset)s")
class ImageCPUPinningForbidden(Forbidden):
msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
"CPU pinning policy set against the flavor")
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
class CellMappingNotFound(NotFound):
msg_fmt = _("Cell %(uuid)s has no mapping.")
class NUMATopologyUnsupported(Invalid):
msg_fmt = _("Host does not support guests with NUMA topology set")
class MemoryPagesUnsupported(Invalid):
msg_fmt = _("Host does not support guests with custom memory page sizes")
class EnumFieldInvalid(Invalid):
msg_fmt = _('%(typename)s in %(fieldname)s is not an instance of Enum')
class EnumFieldUnset(Invalid):
msg_fmt = _('%(fieldname)s missing field type')
class InvalidImageFormat(Invalid):
msg_fmt = _("Invalid image format '%(format)s'")
class UnsupportedImageModel(Invalid):
msg_fmt = _("Image model '%(image)s' is not supported")
class HostMappingNotFound(Invalid):
msg_fmt = _("Host '%(name)s' is not mapped to any cell")
| apache-2.0 | 4,202,397,522,976,552,000 | -4,229,402,772,299,271,700 | 28.09826 | 79 | 0.672852 | false |
dpmatthews/rose | metomi/rose/apps/rose_prune.py | 4 | 11898 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Builtin application: rose_prune: suite housekeeping application."""
import os
from random import shuffle
from metomi.rose.app_run import BuiltinApp, ConfigValueError
from metomi.rose.date import RoseDateTimeOperator
from metomi.rose.env import env_var_process, UnboundEnvironmentVariableError
from metomi.rose.fs_util import FileSystemEvent
from metomi.rose.host_select import HostSelector
from metomi.rose.popen import RosePopenError
import shlex
class RosePruneApp(BuiltinApp):
"""Prune files and directories generated by suite tasks."""
SCHEME = "rose_prune"
SECTION = "prune"
def run(self, app_runner, conf_tree, opts, args, uuid, work_files):
"""Suite housekeeping application.
This application is designed to work under "rose task-run" in a cycling
suite.
"""
suite_name = os.getenv("ROSE_SUITE_NAME")
if not suite_name:
return
# Tar-gzip job logs on suite host
# Prune job logs on remote hosts and suite host
prune_remote_logs_cycles = self._get_conf(
app_runner, conf_tree, "prune-remote-logs-at")
prune_server_logs_cycles = self._get_conf(
app_runner, conf_tree, "prune-server-logs-at")
archive_logs_cycles = self._get_conf(
app_runner, conf_tree, "archive-logs-at")
if (prune_remote_logs_cycles or
prune_server_logs_cycles or
archive_logs_cycles):
tmp_prune_remote_logs_cycles = []
for cycle in prune_remote_logs_cycles:
if cycle not in archive_logs_cycles:
tmp_prune_remote_logs_cycles.append(cycle)
prune_remote_logs_cycles = tmp_prune_remote_logs_cycles
tmp_prune_server_logs_cycles = []
for cycle in prune_server_logs_cycles:
if cycle not in archive_logs_cycles:
tmp_prune_server_logs_cycles.append(cycle)
prune_server_logs_cycles = tmp_prune_server_logs_cycles
if prune_remote_logs_cycles:
app_runner.suite_engine_proc.job_logs_pull_remote(
suite_name, prune_remote_logs_cycles,
prune_remote_mode=True)
if prune_server_logs_cycles:
app_runner.suite_engine_proc.job_logs_remove_on_server(
suite_name, prune_server_logs_cycles)
if archive_logs_cycles:
app_runner.suite_engine_proc.job_logs_archive(
suite_name, archive_logs_cycles)
# Prune other directories
globs, cycle_set = self._get_prune_globs(app_runner, conf_tree)
if not globs:
return
suite_engine_proc = app_runner.suite_engine_proc
hosts = suite_engine_proc.get_suite_jobs_auths(
suite_name, [(cycle, None) for cycle in cycle_set])
# A shuffle here should allow the load for doing "rm -rf" to be shared
# between job hosts who share a file system.
shuffle(hosts)
suite_dir_rel = suite_engine_proc.get_suite_dir_rel(suite_name)
form_dict = {"d": suite_dir_rel, "g": " ".join(globs)}
sh_cmd_head = r"set -e; cd %(d)s; " % form_dict
sh_cmd = (
r"set +e; ls -d %(g)s; " +
r"set -e; rm -fr %(g)s") % form_dict
cwd = os.getcwd()
host_selector = HostSelector(
app_runner.event_handler, app_runner.popen)
for host in hosts + [host_selector.get_local_host()]:
sdir = None
try:
if host_selector.is_local_host(host):
sdir = suite_engine_proc.get_suite_dir(suite_name)
app_runner.fs_util.chdir(sdir)
out = app_runner.popen.run_ok(
"bash", "-O", "extglob", "-c", sh_cmd)[0]
else:
cmd = app_runner.popen.get_cmd(
"ssh", host,
"bash -O extglob -c '" + sh_cmd_head + sh_cmd + "'")
out = app_runner.popen.run_ok(*cmd)[0]
except RosePopenError as exc:
app_runner.handle_event(exc)
else:
if sdir is None:
event = FileSystemEvent(FileSystemEvent.CHDIR,
host + ":" + suite_dir_rel)
app_runner.handle_event(event)
for line in sorted(out.splitlines()):
if not host_selector.is_local_host(host):
line = host + ":" + line
event = FileSystemEvent(
FileSystemEvent.DELETE, line.decode())
app_runner.handle_event(event)
finally:
if sdir:
app_runner.fs_util.chdir(cwd)
return
def _get_conf(self, app_runner, conf_tree, key, max_args=0):
"""Get a list of cycles from a configuration setting.
key -- An option key in self.SECTION to locate the setting.
max_args -- Maximum number of extra arguments for an item in the list.
The value of the setting is expected to be split by shlex.split into a
list of items. If max_args == 0, an item should be a string
representing a cycle or an cycle offset. If max_args > 0, the cycle
or cycle offset string can, optionally, have arguments. The arguments
are delimited by colons ":".
E.g.:
prune-remote-logs-at=-PT6H -PT12H
prune-server-logs-at=-P7D
prune-datac-at=-PT6H:foo/* -PT12H:'bar/* baz/*' -P1D
prune-work-at=-PT6H:t1*:*.tar -PT12H:t1*: -PT12H:*.gz -P1D
If max_args == 0, return a list of cycles.
If max_args > 0, return a list of (cycle, [arg, ...])
"""
items_str = conf_tree.node.get_value([self.SECTION, key])
if items_str is None:
return []
try:
items_str = env_var_process(items_str)
except UnboundEnvironmentVariableError as exc:
raise ConfigValueError([self.SECTION, key], items_str, exc)
items = []
ref_point_str = os.getenv(
RoseDateTimeOperator.TASK_CYCLE_TIME_ENV)
try:
ref_point = None
ref_fmt = None
for item_str in shlex.split(items_str):
args = item_str.split(":", max_args)
when = args.pop(0)
cycle = when
if ref_point_str is not None:
if self._get_cycling_mode() == "integer":
# Integer cycling
if "P" in when: # "when" is an offset
cycle = str(int(ref_point_str) +
int(when.replace("P", "")))
else: # "when" is a cycle point
cycle = str(when)
else:
# Date-time cycling
if ref_fmt is None:
ref_point, ref_fmt = (
app_runner.date_time_oper.date_parse(
ref_point_str))
try:
time_point = app_runner.date_time_oper.date_parse(
when)[0]
except ValueError:
time_point = app_runner.date_time_oper.date_shift(
ref_point, when)
cycle = app_runner.date_time_oper.date_format(
ref_fmt, time_point)
if max_args:
items.append((cycle, args))
else:
items.append(cycle)
except ValueError as exc:
raise ConfigValueError([self.SECTION, key], items_str, exc)
return items
@classmethod
def _get_cycling_mode(cls):
"""Return task cycling mode."""
return os.getenv("ROSE_CYCLING_MODE")
def _get_prune_globs(self, app_runner, conf_tree):
"""Return (globs, cycles).
where:
* globs is for matching items to prune.
* cycles is a set of relevant cycles.
"""
globs = []
nodes = conf_tree.node.get_value([self.SECTION])
if nodes is None:
return [], set()
cycle_formats = {}
for key, node in nodes.items():
if node.is_ignored():
continue
if key.startswith("cycle-format{") and key.endswith("}"):
fmt = key[len("cycle-format{"):-1]
try:
cycle_formats[fmt] = env_var_process(node.value)
# Check formats are valid
if self._get_cycling_mode() == "integer":
cycle_formats[fmt] % 0
else:
app_runner.date_time_oper.date_format(
cycle_formats[fmt])
except (UnboundEnvironmentVariableError, ValueError) as exc:
raise ConfigValueError(
[self.SECTION, key], node.value, exc)
cycle_set = set()
for key, node in sorted(nodes.items()):
if node.is_ignored():
continue
if key == "prune-datac-at": # backward compat
head = "share/cycle"
elif key == "prune-work-at": # backward compat
head = "work"
elif key.startswith("prune{") and key.endswith("}"):
head = key[len("prune{"):-1].strip() # remove "prune{" and "}"
else:
continue
for cycle, cycle_args in self._get_conf(
app_runner, conf_tree, key, max_args=1):
cycle_set.add(cycle)
if cycle_args:
cycle_strs = {"cycle": cycle}
for cycle_key, cycle_format in cycle_formats.items():
if self._get_cycling_mode() == "integer":
cycle_strs[cycle_key] = cycle_format % int(cycle)
else: # date time cycling
cycle_point = (
app_runner.date_time_oper.date_parse(cycle)[0])
cycle_strs[cycle_key] = (
app_runner.date_time_oper.date_format(
cycle_format, cycle_point))
for tail_glob in shlex.split(cycle_args.pop()):
glob_ = tail_glob % cycle_strs
if glob_ == tail_glob: # no substitution
glob_ = os.path.join(cycle, tail_glob)
globs.append(os.path.join(head, glob_))
else:
globs.append(os.path.join(head, cycle))
return globs, cycle_set
| gpl-3.0 | 1,453,670,310,399,064,000 | -2,420,313,045,492,657,000 | 42.742647 | 79 | 0.51723 | false |
alistairlow/tensorflow | tensorflow/python/summary/text_summary.py | 19 | 2872 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements text_summary in TensorFlow, with TensorBoard support.
The text_summary is a wrapper around the generic tensor_summary that takes a
string-type tensor and emits a TensorSummary op with SummaryMetadata that
notes that this summary is textual data for the TensorBoard text plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.ops.summary_ops import tensor_summary
PLUGIN_NAME = "text"
def text_summary(name, tensor, collections=None):
"""Summarizes textual data.
Text data summarized via this plugin will be visible in the Text Dashboard
in TensorBoard. The standard TensorBoard Text Dashboard will render markdown
in the strings, and will automatically organize 1d and 2d tensors into tables.
If a tensor with more than 2 dimensions is provided, a 2d subarray will be
displayed along with a warning message. (Note that this behavior is not
intrinsic to the text summary api, but rather to the default TensorBoard text
plugin.)
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: a string-type Tensor to summarize.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
A TensorSummary op that is configured so that TensorBoard will recognize
that it contains textual data. The TensorSummary is a scalar `Tensor` of
type `string` which contains `Summary` protobufs.
Raises:
ValueError: If tensor has the wrong type.
"""
if tensor.dtype != dtypes.string:
raise ValueError("Expected tensor %s to have dtype string, got %s" %
(tensor.name, tensor.dtype))
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME))
t_summary = tensor_summary(
name=name,
tensor=tensor,
summary_metadata=summary_metadata,
collections=collections)
return t_summary
| apache-2.0 | -6,929,712,773,762,184,000 | 1,529,433,764,951,730,700 | 39.450704 | 80 | 0.730153 | false |
radicalbit/ambari | ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py | 4 | 6865 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.core.system import System
import os
config = Script.get_config()
#RPM versioning support
rpm_version = default("/configurations/cluster-env/rpm_version", None)
#hadoop params
if rpm_version:
mapreduce_libs_path = "/usr/bigtop/current/hadoop-mapreduce-client/*"
hadoop_libexec_dir = "/usr/bigtop/current/hadoop-client/libexec"
hadoop_lib_home = "/usr/bigtop/current/hadoop-client/lib"
hadoop_bin = "/usr/bigtop/current/hadoop-client/sbin"
hadoop_home = '/usr/bigtop/current/hadoop-client'
else:
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
hadoop_lib_home = "/usr/lib/hadoop/lib"
hadoop_bin = "/usr/lib/hadoop/sbin"
hadoop_home = '/usr'
hadoop_conf_dir = "/etc/hadoop/conf"
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#users and groups
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_zk_host = not len(zk_hosts) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#hadoop params
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
#db params
server_db_name = config['hostLevelParams']['db_name']
db_driver_filename = config['hostLevelParams']['db_driver_filename']
oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
ambari_server_resources = config['hostLevelParams']['jdk_location']
oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
rca_prefix = ""
else:
rca_prefix = rca_disabled_prefix
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.get_instance().os_family != "suse":
# deprecated rhel jsvc_path
jsvc_path = "/usr/libexec/bigtop-utils"
else:
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
#log4j.properties
if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
log4j_props = config['configurations']['hdfs-log4j']['content']
if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
log4j_props += config['configurations']['yarn-log4j']['content']
else:
log4j_props = None
| apache-2.0 | 5,238,976,066,763,688,000 | -8,166,904,280,991,525,000 | 42.449367 | 115 | 0.740131 | false |
google/ion | ion/dev/doxygen_filter.py | 1 | 8299 | #!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Doxygen pre-filter script for ion.
This filter processes code and adds Doxygen-compatible markup in various places
to enable Doxygen to read the docs more fully. Unlike some other Doxygen
filters, it is designed to work with Doxygen's newer markdown syntax.
In order to ensure proper syntax coloring of indented code blocks, make sure
there is a blank (commented) line both above and below the block. For example:
// Comment comment comment.
//
// int CodeBlock() {
// Goes here;
// }
//
// More comment.
"""
import re
import sys
class DoxygenFormatter(object):
"""Transforms lines of a source file to make them doxygen-friendly."""
ANYWHERE = 'anywhere'
COMMENT = 'comment'
def __init__(self, outfile):
# The file-like object to which we will write lines.
self.out = outfile
# A buffer for storing empty lines which we can use later if we need to
# retroactively insert markup without causing line number offset problems.
self.empty_line_buffer = []
# Whether we are currently inside an indented code block.
self.in_code_block = False
self.CompileExpressions()
def CompileExpressions(self):
"""Pre-compiles frequently used regexps for improved performance.
The regexps are arranged as a list of 3-tuples, where the second value is
the replacement string (which may include backreferences) and the third
value is one of the context constants ANYWHERE or COMMENT. This is a list
of tuples instead of a dictionary because order matters: earlier regexps
will be applied first, and the resulting text (not the original) will be
what is seen by subsequent regexps.
"""
self.comment_regex = re.compile(r'^\s*//')
self.substitutions = [
# Remove copyright lines.
(re.compile(r'^\s*//\s*[Cc]opyright.*Google.*'), r'', self.ANYWHERE),
# Remove any comment lines that consist of only punctuation (banners).
# We only allow a maximum of two spaces before the punctuation so we
# don't accidentally get rid of code examples with bare braces and
# whatnot.
(re.compile(r'(^\s*)//\s{0,2}[-=#/]+$'), r'\1//\n', self.ANYWHERE),
# If we find something that looks like a list item that is indented four
# or more spaces, pull it back to the left so doxygen's Markdown engine
# doesn't treat it like a code block.
(re.compile(r'(^\s*)//\s{4,}([-\d*].*)'), r'\1 \2', self.COMMENT),
(re.compile(r'TODO'), r'@todo ', self.COMMENT),
# Replace leading 'Note:' or 'Note that' in a comment with @note
(re.compile(r'(\/\/\s+)Note(?:\:| that)', re.I), r'\1@note',
self.COMMENT),
# Replace leading 'Warning:' in a comment with @warning
(re.compile(r'(\/\/\s+)Warning:', re.I), r'\1@warning', self.COMMENT),
# Replace leading 'Deprecated' in a comment with @deprecated
(re.compile(r'(\/\/\s+)Deprecated[^\w\s]*', re.I), r'\1@deprecated',
self.COMMENT),
# Replace pipe-delimited parameter names with backtick-delimiters
(re.compile(r'\|(\w+)\|'), r'`\1`', self.COMMENT),
# Convert standalone comment lines to Doxygen style.
(re.compile(r'(^\s*)//(?=[^/])'), r'\1///', self.ANYWHERE),
# Strip trailing comments from preprocessor directives.
(re.compile(r'(^#.*)//.*'), r'\1', self.ANYWHERE),
# Convert remaining trailing comments to doxygen style, unless they are
# documenting the end of a block.
(re.compile(r'([^} ]\s+)//(?=[^/])'), r'\1///<', self.ANYWHERE),
]
def Transform(self, line):
"""Performs the regexp transformations defined by self.substitutions.
Args:
line: The line to transform.
Returns:
The resulting line.
"""
for (regex, repl, where) in self.substitutions:
if where is self.COMMENT and not self.comment_regex.match(line):
return line
line = regex.sub(repl, line)
return line
def AppendToBufferedLine(self, text):
"""Appends text to the last buffered empty line.
Empty lines are buffered rather than being written out directly. This lets
us retroactively rewrite buffered lines to include markup that affects the
following line, while avoiding the line number offset that would result from
inserting a line that wasn't in the original source.
Args:
text: The text to append to the line.
Returns:
True if there was an available empty line to which text could be
appended, and False otherwise.
"""
if self.empty_line_buffer:
last_line = self.empty_line_buffer.pop().rstrip()
last_line += text + '\n'
self.empty_line_buffer.append(last_line)
return True
else:
return False
def ConvertCodeBlock(self, line):
"""Converts any code block that may begin or end on this line.
Doxygen has (at least) two kinds of code blocks. Any block indented at
least four spaces gets formatted as code, but (for some reason) no syntax
highlighting is applied. Any block surrounded by "~~~" on both sides is
also treated as code, but these are syntax highlighted intelligently
depending on the file type. We typically write code blocks in the former
style, but we'd like them to be highlighted, so this function converts them
to the latter style by adding in the ~~~ lines.
To make this a bit more complicated, we would really prefer not to insert
new lines into the file, since that will make the line numbers shown in
doxygen not match the line numbers in the actual source code. For this
reason, we only perform the conversion if at least one "blank" line (empty
comment line) appears before the start of the code block. If we get down to
the bottom of the block and there's no blank line after it, we will be
forced to add a line, since we can't go back and undo what we already did.
Args:
line: The line to process.
Returns:
The converted line.
"""
if not self.in_code_block and re.match(r'\s*///\s{4,}', line):
if self.AppendToBufferedLine(' ~~~'):
# If this fails, we'll just leave it un-highlighted.
self.in_code_block = True
elif self.in_code_block and not re.match(r'\s*///\s{4,}', line):
if not self.AppendToBufferedLine(' ~~~'):
# This is bad. We don't have a buffered line to use to end the code
# block, so we'll have to insert one. This will cause the line
# numbers to stop matching the original source, unfortunately.
line = '/// ~~~\n' + line
self.in_code_block = False
return line
def ProcessLine(self, line):
"""Processes a line.
If the line is an empty line inside a comment, we buffer it for possible
rewriting later on. Otherwise, we transform it using our regexps and
write it (as well as any buffered blank lines) out to the output.
Args:
line: The line to process.
"""
line = self.Transform(line)
if line.strip() == '///':
# We may repurpose this empty line later, so don't write it out yet.
self.empty_line_buffer.append(line)
else:
line = self.ConvertCodeBlock(line)
# Flush the line buffer and write this line as well.
for buffered_line in self.empty_line_buffer:
self.out.write(buffered_line)
self.empty_line_buffer = []
self.out.write(line)
def main(argv):
sourcefile = argv[1]
with open(sourcefile, 'r') as infile:
formatter = DoxygenFormatter(sys.stdout)
for line in infile:
formatter.ProcessLine(line)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 5,432,935,695,032,352,000 | -6,842,194,362,456,261,000 | 35.884444 | 80 | 0.662369 | false |
FlannelFox/FlannelFox | tests/flannelfox/torrenttools/test_torrentQueue.py | 1 | 1999 | # -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch
import os
from flannelfox.torrenttools.TorrentQueue import Queue
from flannelfox.torrenttools import Torrents
class TestTorrentQueue(unittest.TestCase):
testDatabaseFile = 'ff.db'
def removeDatabase(self):
try:
os.remove(self.testDatabaseFile)
except Exception:
pass
@patch.object(Queue, 'databaseTorrentBlacklisted')
@patch.object(Queue, 'databaseTorrentExists')
def test_Queue(self, mockDatabaseTorrentExists, mockDatabaseTorrentBlacklisted):
self.removeDatabase()
torrentQueue = Queue()
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
# Ensure len returns a valid answer
self.assertEqual(len(torrentQueue), 0)
# Make sure appending an item works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Make sure appending a duplicate item does not work
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e01.720p.junk.here'))
self.assertEqual(len(torrentQueue), 1)
# Add a different item and make sure it works
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here2'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = True
mockDatabaseTorrentExists.return_value = False
# Check if Blacklisted torrent gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = True
# Check if Existing Torrent in Database gets blocked
torrentQueue.append(Torrents.TV(torrentTitle='some.show.s01e02.720p.junk.here3'))
self.assertEqual(len(torrentQueue), 2)
mockDatabaseTorrentBlacklisted.return_value = False
mockDatabaseTorrentExists.return_value = False
if __name__ == '__main__':
unittest.main()
| mit | 572,720,309,520,632,300 | -3,751,697,230,937,025,000 | 29.753846 | 83 | 0.78039 | false |
vhosouza/invesalius3 | invesalius/gui/task_exporter.py | 1 | 15556 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: [email protected]
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os
import pathlib
import sys
import wx
try:
import wx.lib.agw.hyperlink as hl
except ImportError:
import wx.lib.hyperlink as hl
import wx.lib.platebtn as pbtn
from pubsub import pub as Publisher
import invesalius.constants as const
import invesalius.gui.dialogs as dlg
import invesalius.project as proj
import invesalius.session as ses
from invesalius import inv_paths
BTN_MASK = wx.NewId()
BTN_PICTURE = wx.NewId()
BTN_SURFACE = wx.NewId()
BTN_REPORT = wx.NewId()
BTN_REQUEST_RP = wx.NewId()
WILDCARD_SAVE_3D = "Inventor (*.iv)|*.iv|"\
"PLY (*.ply)|*.ply|"\
"Renderman (*.rib)|*.rib|"\
"STL (*.stl)|*.stl|"\
"STL ASCII (*.stl)|*.stl|"\
"VRML (*.vrml)|*.vrml|"\
"VTK PolyData (*.vtp)|*.vtp|"\
"Wavefront (*.obj)|*.obj|"\
"X3D (*.x3d)|*.x3d"
INDEX_TO_TYPE_3D = {0: const.FILETYPE_IV,
1: const.FILETYPE_PLY,
2: const.FILETYPE_RIB,
3: const.FILETYPE_STL,
4: const.FILETYPE_STL_ASCII,
5: const.FILETYPE_VRML,
6: const.FILETYPE_VTP,
7: const.FILETYPE_OBJ,
8: const.FILETYPE_X3D}
INDEX_TO_EXTENSION = {0: "iv",
1: "ply",
2: "rib",
3: "stl",
4: "stl",
5: "vrml",
6: "vtp",
7: "obj",
8: "x3d"}
WILDCARD_SAVE_2D = "BMP (*.bmp)|*.bmp|"\
"JPEG (*.jpg)|*.jpg|"\
"PNG (*.png)|*.png|"\
"PostScript (*.ps)|*.ps|"\
"Povray (*.pov)|*.pov|"\
"TIFF (*.tiff)|*.tiff"
INDEX_TO_TYPE_2D = {0: const.FILETYPE_BMP,
1: const.FILETYPE_JPG,
2: const.FILETYPE_PNG,
3: const.FILETYPE_PS,
4: const.FILETYPE_POV,
5: const.FILETYPE_OBJ}
WILDCARD_SAVE_MASK = "VTK ImageData (*.vti)|*.vti"
class TaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
inner_panel = InnerTaskPanel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(inner_panel, 1, wx.EXPAND | wx.GROW | wx.BOTTOM | wx.RIGHT |
wx.LEFT, 7)
sizer.Fit(self)
self.SetSizer(sizer)
self.Update()
self.SetAutoLayout(1)
class InnerTaskPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
backgroud_colour = wx.Colour(255,255,255)
self.SetBackgroundColour(backgroud_colour)
self.SetAutoLayout(1)
# Counter for projects loaded in current GUI
# Fixed hyperlink items
tooltip = wx.ToolTip(_("Export InVesalius screen to an image file"))
link_export_picture = hl.HyperLinkCtrl(self, -1,
_("Export picture..."))
link_export_picture.SetUnderlines(False, False, False)
link_export_picture.SetBold(True)
link_export_picture.SetColours("BLACK", "BLACK", "BLACK")
link_export_picture.SetBackgroundColour(self.GetBackgroundColour())
link_export_picture.SetToolTip(tooltip)
link_export_picture.AutoBrowse(False)
link_export_picture.UpdateLink()
link_export_picture.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportPicture)
tooltip = wx.ToolTip(_("Export 3D surface"))
link_export_surface = hl.HyperLinkCtrl(self, -1,_("Export 3D surface..."))
link_export_surface.SetUnderlines(False, False, False)
link_export_surface.SetBold(True)
link_export_surface.SetColours("BLACK", "BLACK", "BLACK")
link_export_surface.SetBackgroundColour(self.GetBackgroundColour())
link_export_surface.SetToolTip(tooltip)
link_export_surface.AutoBrowse(False)
link_export_surface.UpdateLink()
link_export_surface.Bind(hl.EVT_HYPERLINK_LEFT,
self.OnLinkExportSurface)
#tooltip = wx.ToolTip(_("Export 3D mask (voxels)"))
#link_export_mask = hl.HyperLinkCtrl(self, -1,_("Export mask..."))
#link_export_mask.SetUnderlines(False, False, False)
#link_export_mask.SetColours("BLACK", "BLACK", "BLACK")
#link_export_mask.SetToolTip(tooltip)
#link_export_mask.AutoBrowse(False)
#link_export_mask.UpdateLink()
#link_export_mask.Bind(hl.EVT_HYPERLINK_LEFT,
# self.OnLinkExportMask)
#tooltip = wx.ToolTip("Request rapid prototyping services")
#link_request_rp = hl.HyperLinkCtrl(self,-1,"Request rapid prototyping...")
#link_request_rp.SetUnderlines(False, False, False)
#link_request_rp.SetColours("BLACK", "BLACK", "BLACK")
#link_request_rp.SetToolTip(tooltip)
#link_request_rp.AutoBrowse(False)
#link_request_rp.UpdateLink()
#link_request_rp.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkRequestRP)
#tooltip = wx.ToolTip("Open report tool...")
#link_report = hl.HyperLinkCtrl(self,-1,"Open report tool...")
#link_report.SetUnderlines(False, False, False)
#link_report.SetColours("BLACK", "BLACK", "BLACK")
#link_report.SetToolTip(tooltip)
#link_report.AutoBrowse(False)
#link_report.UpdateLink()
#link_report.Bind(hl.EVT_HYPERLINK_LEFT, self.OnLinkReport)
# Image(s) for buttons
if sys.platform == 'darwin':
BMP_EXPORT_SURFACE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "surface_export_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(\
os.path.join(inv_paths.ICON_DIR, "tool_photo_original.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask.png",
# wx.BITMAP_TYPE_PNG)
else:
BMP_EXPORT_SURFACE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "surface_export.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
BMP_TAKE_PICTURE = wx.Bitmap(os.path.join(inv_paths.ICON_DIR, "tool_photo.png"),
wx.BITMAP_TYPE_PNG).ConvertToImage()\
.Rescale(25, 25).ConvertToBitmap()
#BMP_EXPORT_MASK = wx.Bitmap("../icons/mask_small.png",
# wx.BITMAP_TYPE_PNG)
# Buttons related to hyperlinks
button_style = pbtn.PB_STYLE_SQUARE | pbtn.PB_STYLE_DEFAULT
button_picture = pbtn.PlateButton(self, BTN_PICTURE, "",
BMP_TAKE_PICTURE,
style=button_style)
button_picture.SetBackgroundColour(self.GetBackgroundColour())
self.button_picture = button_picture
button_surface = pbtn.PlateButton(self, BTN_SURFACE, "",
BMP_EXPORT_SURFACE,
style=button_style)
button_surface.SetBackgroundColour(self.GetBackgroundColour())
#button_mask = pbtn.PlateButton(self, BTN_MASK, "",
# BMP_EXPORT_MASK,
# style=button_style)
#button_request_rp = pbtn.PlateButton(self, BTN_REQUEST_RP, "",
# BMP_IMPORT, style=button_style)
#button_report = pbtn.PlateButton(self, BTN_REPORT, "",
# BMP_IMPORT,
# style=button_style)
# When using PlaneButton, it is necessary to bind events from parent win
self.Bind(wx.EVT_BUTTON, self.OnButton)
# Tags and grid sizer for fixed items
flag_link = wx.EXPAND|wx.GROW|wx.LEFT|wx.TOP
flag_button = wx.EXPAND | wx.GROW
fixed_sizer = wx.FlexGridSizer(rows=2, cols=2, hgap=2, vgap=0)
fixed_sizer.AddGrowableCol(0, 1)
fixed_sizer.AddMany([ (link_export_picture, 1, flag_link, 3),
(button_picture, 0, flag_button),
(link_export_surface, 1, flag_link, 3),
(button_surface, 0, flag_button),])
#(link_export_mask, 1, flag_link, 3),
#(button_mask, 0, flag_button)])
#(link_report, 0, flag_link, 3),
#(button_report, 0, flag_button),
#(link_request_rp, 1, flag_link, 3),
#(button_request_rp, 0, flag_button)])
# Add line sizers into main sizer
main_sizer = wx.BoxSizer(wx.VERTICAL)
main_sizer.Add(fixed_sizer, 0, wx.GROW|wx.EXPAND)
# Update main sizer and panel layout
self.SetSizer(main_sizer)
self.Fit()
self.sizer = main_sizer
self.__init_menu()
def __init_menu(self):
menu = wx.Menu()
self.id_to_name = {const.AXIAL:_("Axial slice"),
const.CORONAL:_("Coronal slice"),
const.SAGITAL:_("Sagittal slice"),
const.VOLUME:_("Volume")}
for id in self.id_to_name:
item = wx.MenuItem(menu, id, self.id_to_name[id])
menu.Append(item)
self.menu_picture = menu
menu.Bind(wx.EVT_MENU, self.OnMenuPicture)
def OnMenuPicture(self, evt):
id = evt.GetId()
value = dlg.ExportPicture(self.id_to_name[id])
if value:
filename, filetype = value
Publisher.sendMessage('Export picture to file',
orientation=id, filename=filename, filetype=filetype)
def OnLinkExportPicture(self, evt=None):
self.button_picture.PopupMenu(self.menu_picture)
def OnLinkExportMask(self, evt=None):
project = proj.Project()
if sys.platform == 'win32':
project_name = project.name
else:
project_name = project.name+".vti"
dlg = wx.FileDialog(None,
"Save mask as...", # title
"", # last used directory
project_name, # filename
WILDCARD_SAVE_MASK,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(0) # default is VTI
if dlg.ShowModal() == wx.ID_OK:
filename = dlg.GetPath()
extension = "vti"
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
filetype = const.FILETYPE_IMAGEDATA
Publisher.sendMessage('Export mask to file',
filename=filename,
filetype=filetype)
def OnLinkExportSurface(self, evt=None):
"OnLinkExportSurface"
project = proj.Project()
n_surface = 0
for index in project.surface_dict:
if project.surface_dict[index].is_shown:
n_surface += 1
if n_surface:
if sys.platform == 'win32':
project_name = pathlib.Path(project.name).stem
else:
project_name = pathlib.Path(project.name).stem + ".stl"
session = ses.Session()
last_directory = session.get('paths', 'last_directory_3d_surface', '')
dlg = wx.FileDialog(None,
_("Save 3D surface as..."), # title
last_directory, # last used directory
project_name, # filename
WILDCARD_SAVE_3D,
wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(3) # default is STL
if dlg.ShowModal() == wx.ID_OK:
filetype_index = dlg.GetFilterIndex()
filetype = INDEX_TO_TYPE_3D[filetype_index]
filename = dlg.GetPath()
extension = INDEX_TO_EXTENSION[filetype_index]
if sys.platform != 'win32':
if filename.split(".")[-1] != extension:
filename = filename + "."+ extension
if filename:
session['paths']['last_directory_3d_surface'] = os.path.split(filename)[0]
session.WriteSessionFile()
Publisher.sendMessage('Export surface to file',
filename=filename, filetype=filetype)
if not os.path.exists(filename):
dlg = wx.MessageDialog(None,
_("It was not possible to save the surface."),
_("Error saving surface"),
wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(None,
_("You need to create a surface and make it ") +
_("visible before exporting it."),
'InVesalius 3',
wx.OK | wx.ICON_INFORMATION)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def OnLinkRequestRP(self, evt=None):
pass
def OnLinkReport(self, evt=None):
pass
def OnButton(self, evt):
id = evt.GetId()
if id == BTN_PICTURE:
self.OnLinkExportPicture()
elif id == BTN_SURFACE:
self.OnLinkExportSurface()
elif id == BTN_REPORT:
self.OnLinkReport()
elif id == BTN_REQUEST_RP:
self.OnLinkRequestRP()
else:# id == BTN_MASK:
self.OnLinkExportMask()
| gpl-2.0 | -2,973,712,967,444,846,000 | -3,562,088,498,740,124,000 | 39.300518 | 98 | 0.509771 | false |
bmya/tkobr-addons | tko_web_sessions_management/main.py | 1 | 11671 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp.osv import fields, osv, orm
import pytz
from datetime import date, datetime, time, timedelta
from dateutil.relativedelta import *
from openerp.addons.base.ir.ir_cron import _intervalTypes
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.http import request
from openerp.tools.translate import _
from openerp import http
import werkzeug.contrib.sessions
from openerp.http import Response
# from openerp import pooler
_logger = logging.getLogger(__name__)
class Home_tkobr(openerp.addons.web.controllers.main.Home):
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
openerp.addons.web.controllers.main.ensure_db()
multi_ok = True
calendar_set = 0
calendar_ok = False
calendar_group = ''
unsuccessful_message = ''
now = datetime.now()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = False
if 'login' in request.params and 'password' in request.params:
uid = request.session.authenticate(request.session.db, request.params[
'login'], request.params['password'])
if uid is not False:
user = request.registry.get('res.users').browse(
request.cr, request.uid, uid, request.context)
if not uid is SUPERUSER_ID:
# check for multiple sessions block
sessions = request.registry.get('ir.sessions').search(
request.cr, request.uid, [
('user_id', '=', uid), ('logged_in', '=', True)], context=request.context)
if sessions and user.multiple_sessions_block:
multi_ok = False
if multi_ok:
# check calendars
calendar_obj = request.registry.get(
'resource.calendar')
attendance_obj = request.registry.get(
'resource.calendar.attendance')
# GET USER LOCAL TIME
if user.tz:
tz = pytz.timezone(user.tz)
else:
tz = pytz.timezone('GMT')
tzoffset = tz.utcoffset(now)
now = now + tzoffset
if user.login_calendar_id:
calendar_set += 1
# check user calendar
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', user.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in user" % request.params[
'login']
else:
# check user groups calendar
for group in user.groups_id:
if group.login_calendar_id:
calendar_set += 1
attendances = attendance_obj.search(request.cr,
request.uid, [('calendar_id', '=', group.login_calendar_id.id),
('dayofweek', '=', str(now.weekday())),
('hour_from', '<=', now.hour + now.minute / 60.0),
('hour_to', '>=', now.hour + now.minute / 60.0)],
context=request.context)
if attendances:
calendar_ok = True
else:
calendar_group = group.name
if sessions and group.multiple_sessions_block and multi_ok:
multi_ok = False
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in group '%s'" % (
request.params['login'], group.name)
break
if calendar_set > 0 and calendar_ok == False:
unsuccessful_message = "unsuccessful login from '%s', user time out of allowed calendar defined in group '%s'" % (
request.params['login'], calendar_group)
else:
unsuccessful_message = "unsuccessful login from '%s', multisessions block defined in user" % request.params[
'login']
else:
unsuccessful_message = "unsuccessful login from '%s', wrong username or password" % request.params[
'login']
if not unsuccessful_message or uid is SUPERUSER_ID:
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
context=request.context)
return http.redirect_with_hash(redirect)
user = request.registry.get('res.users').browse(
request.cr, SUPERUSER_ID, SUPERUSER_ID, request.context)
self.save_session(
request.cr,
uid,
user.tz,
request.httprequest.session.sid,
unsuccessful_message,
request.context)
_logger.error(unsuccessful_message)
request.uid = old_uid
values['error'] = 'Login failed due to one of the following reasons:'
values['reason1'] = '- Wrong login/password'
values['reason2'] = '- User not allowed to have multiple logins'
values[
'reason3'] = '- User not allowed to login at this specific time or day'
return request.render('web.login', values)
def save_session(
self,
cr,
uid,
tz,
sid,
unsuccessful_message='',
context=None):
now = fields.datetime.now()
session_obj = request.registry.get('ir.sessions')
cr = request.registry.cursor()
# for GeoIP
geo_ip_resolver = None
ip_location = ""
try:
import GeoIP
geo_ip_resolver = GeoIP.open(
'/usr/share/GeoIP/GeoIP.dat',
GeoIP.GEOIP_STANDARD)
except ImportError:
geo_ip_resolver = False
if geo_ip_resolver:
ip_location = (str(geo_ip_resolver.country_name_by_addr(
request.httprequest.remote_addr)) or "")
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
user = request.registry.get('res.users').browse(
cr, request.uid, uid, request.context)
ip = request.httprequest.headers.environ['REMOTE_ADDR']
logged_in = True
if unsuccessful_message:
uid = SUPERUSER_ID
logged_in = False
sessions = False
else:
sessions = session_obj.search(cr, uid, [('session_id', '=', sid),
('ip', '=', ip),
('user_id', '=', uid),
('logged_in', '=', True)],
context=context)
if not sessions:
values = {
'user_id': uid,
'logged_in': logged_in,
'session_id': sid,
'session_seconds': user.session_default_seconds,
'multiple_sessions_block': user.multiple_sessions_block,
'date_login': now,
'expiration_date': datetime.strftime(
(datetime.strptime(
now,
DEFAULT_SERVER_DATETIME_FORMAT) +
relativedelta(
seconds=user.session_default_seconds)),
DEFAULT_SERVER_DATETIME_FORMAT),
'ip': ip,
'ip_location': ip_location,
'remote_tz': tz or 'GMT',
'unsuccessful_message': unsuccessful_message,
}
session_obj.create(cr, uid, values, context=context)
cr.commit()
cr.close()
return True
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True, logout_type='ul')
return werkzeug.utils.redirect(redirect, 303)
| agpl-3.0 | 2,543,243,164,534,203,400 | -4,287,039,769,622,199,300 | 46.060484 | 154 | 0.476223 | false |
jlaine/django-coconuts | tests/test_render.py | 1 | 4789 | #
# django-coconuts
# Copyright (c) 2008-2019, Jeremy Lainé
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
from PIL import Image
from tests import BaseTest
class RenderFileTest(BaseTest):
files = ['test.jpg', 'test.mp4', 'test.png', 'test.txt', 'test_portrait.jpg', 'test_portrait.mp4', 'test_rotated.jpg', 'test_rotated.mp4']
fixtures = ['test_users.json']
def assertImage(self, response, content_type, expected_size):
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], content_type)
self.assertTrue('Expires' in response)
self.assertTrue('Last-Modified' in response)
# check size
fp = io.BytesIO(b''.join(response.streaming_content))
img = Image.open(fp)
self.assertEqual(img.size, expected_size)
def test_as_anonymous(self):
"""
Anonymous user cannot render a file.
"""
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 401)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 401)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertEqual(response.status_code, 401)
# good size, good path
response = self.client.get('/images/render/test.png?size=1024')
self.assertEqual(response.status_code, 401)
def test_as_user_bad(self):
"""
Authenticated user can render a file.
"""
self.client.login(username="test_user_1", password="test")
# no size
response = self.client.get('/images/render/test.jpg')
self.assertEqual(response.status_code, 400)
# bad size
response = self.client.get('/images/render/test.jpg?size=123')
self.assertEqual(response.status_code, 400)
# good size, bad path
response = self.client.get('/images/render/notfound.jpg?size=1024')
self.assertEqual(response.status_code, 404)
# good size, bad type
response = self.client.get('/images/render/test.txt?size=1024')
self.assertEqual(response.status_code, 400)
def test_as_user_good(self):
self.client.login(username="test_user_1", password="test")
response = self.client.get('/images/render/test.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 682))
response = self.client.get('/images/render/test_portrait.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_portrait.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test_rotated.jpg?size=1024')
self.assertImage(response, 'image/jpeg', (512, 768))
response = self.client.get('/images/render/test_rotated.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (432, 768))
response = self.client.get('/images/render/test.png?size=1024')
self.assertImage(response, 'image/png', (24, 24))
response = self.client.get('/images/render/test.mp4?size=1024')
self.assertImage(response, 'image/jpeg', (1024, 576))
| bsd-2-clause | -951,290,314,768,015,400 | 5,768,237,473,872,232,000 | 39.235294 | 142 | 0.676901 | false |
ESS-LLP/erpnext-healthcare | erpnext/config/stock.py | 5 | 6499 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Stock Transactions"),
"items": [
{
"type": "doctype",
"name": "Stock Entry",
},
{
"type": "doctype",
"name": "Delivery Note",
},
{
"type": "doctype",
"name": "Purchase Receipt",
},
{
"type": "doctype",
"name": "Material Request",
},
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Balance",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Projected Qty",
"doctype": "Item",
},
{
"type": "page",
"name": "stock-balance",
"label": _("Stock Summary")
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"doctype": "Item",
},
{
"type": "report",
"is_query_report": True,
"name": "Item Price Stock",
"doctype": "Item",
}
]
},
{
"label": _("Items and Pricing"),
"items": [
{
"type": "doctype",
"name": "Item",
},
{
"type": "doctype",
"name": "Item Alternative",
},
{
"type": "doctype",
"name": "Product Bundle",
},
{
"type": "doctype",
"name": "Price List",
},
{
"type": "doctype",
"name": "Item Group",
"icon": "fa fa-sitemap",
"label": _("Item Group"),
"link": "Tree/Item Group",
},
{
"type": "doctype",
"name": "Item Price",
},
{
"type": "doctype",
"name": "Shipping Rule",
},
{
"type": "doctype",
"name": "Pricing Rule",
},
{
"type": "doctype",
"name": "Item Variant Settings",
},
]
},
{
"label": _("Serial No and Batch"),
"items": [
{
"type": "doctype",
"name": "Serial No",
},
{
"type": "doctype",
"name": "Batch",
},
{
"type": "doctype",
"name": "Installation Note",
},
{
"type": "report",
"name": "Serial No Service Contract Expiry",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Status",
"doctype": "Serial No"
},
{
"type": "report",
"name": "Serial No Warranty Expiry",
"doctype": "Serial No"
},
]
},
{
"label": _("Fulfilment"),
"items": [
{
"type": "doctype",
"name": "Delivery Trip",
"description": _("Delivery Trip service tours to customers.")
}
]
},
{
"label": _("Tools"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "Stock Reconciliation",
},
{
"type": "doctype",
"name": "Packing Slip",
},
{
"type": "doctype",
"name": "Quality Inspection",
},
{
"type": "doctype",
"name": "Quality Inspection Template",
},
{
"type": "doctype",
"name": "Landed Cost Voucher",
}
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Stock Settings",
},
{
"type": "doctype",
"name": "Warehouse",
},
{
"type": "doctype",
"name": "UOM",
"label": _("Unit of Measure") + " (UOM)",
},
{
"type": "doctype",
"name": "Item Attribute",
},
{
"type": "doctype",
"name": "Brand",
},
{
"type": "doctype",
"name": "Item Variant Settings",
},
]
},
{
"label": _("Analytics"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"is_query_report": False,
"name": "Item-wise Price List Rate",
"doctype": "Item Price",
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Analytics",
"doctype": "Stock Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Delivery Note Trends",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Receipt Trends",
"doctype": "Purchase Receipt"
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Ordered Items To Be Delivered",
"doctype": "Delivery Note"
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Items To Be Received",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"name": "Item Shortage Report",
"route": "Report/Bin/Item Shortage Report",
"doctype": "Purchase Receipt"
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Transferred",
"doctype": "Material Request"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch-Wise Balance History",
"doctype": "Batch"
},
{
"type": "report",
"is_query_report": True,
"name": "Batch Item Expiry Status",
"doctype": "Stock Ledger Entry"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Prices",
"doctype": "Price List"
},
{
"type": "report",
"is_query_report": True,
"name": "Itemwise Recommended Reorder Level",
"doctype": "Item"
},
{
"type": "report",
"is_query_report": True,
"name": "Item Variant Details",
"doctype": "Item"
}
]
},
{
"label": _("Help"),
"icon": "fa fa-facetime-video",
"items": [
{
"type": "help",
"label": _("Items and Pricing"),
"youtube_id": "qXaEwld4_Ps"
},
{
"type": "help",
"label": _("Item Variants"),
"youtube_id": "OGBETlCzU5o"
},
{
"type": "help",
"label": _("Opening Stock Balance"),
"youtube_id": "0yPgrtfeCTs"
},
{
"type": "help",
"label": _("Making Stock Entries"),
"youtube_id": "Njt107hlY3I"
},
{
"type": "help",
"label": _("Serialized Inventory"),
"youtube_id": "gvOVlEwFDAk"
},
{
"type": "help",
"label": _("Batch Inventory"),
"youtube_id": "J0QKl7ABPKM"
},
{
"type": "help",
"label": _("Managing Subcontracting"),
"youtube_id": "ThiMCC2DtKo"
},
]
}
]
| gpl-3.0 | -5,292,158,681,575,236,000 | -3,260,669,747,007,224,000 | 18.002924 | 66 | 0.458994 | false |
neurotechuoft/MindType | Code/V1/src/deprecated/pyqtgraph/flowchart/library/Data.py | 35 | 12746 | # -*- coding: utf-8 -*-
from ..Node import Node
from ...Qt import QtGui, QtCore
import numpy as np
from .common import *
from ...SRTTransform import SRTTransform
from ...Point import Point
from ...widgets.TreeWidget import TreeWidget
from ...graphicsItems.LinearRegionItem import LinearRegionItem
from . import functions
class ColumnSelectNode(Node):
"""Select named columns from a record array or MetaArray."""
nodeName = "ColumnSelect"
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in'}})
self.columns = set()
self.columnList = QtGui.QListWidget()
self.axis = 0
self.columnList.itemChanged.connect(self.itemChanged)
def process(self, In, display=True):
if display:
self.updateList(In)
out = {}
if hasattr(In, 'implements') and In.implements('MetaArray'):
for c in self.columns:
out[c] = In[self.axis:c]
elif isinstance(In, np.ndarray) and In.dtype.fields is not None:
for c in self.columns:
out[c] = In[c]
else:
self.In.setValueAcceptable(False)
raise Exception("Input must be MetaArray or ndarray with named fields")
return out
def ctrlWidget(self):
return self.columnList
def updateList(self, data):
if hasattr(data, 'implements') and data.implements('MetaArray'):
cols = data.listColumns()
for ax in cols: ## find first axis with columns
if len(cols[ax]) > 0:
self.axis = ax
cols = set(cols[ax])
break
else:
cols = list(data.dtype.fields.keys())
rem = set()
for c in self.columns:
if c not in cols:
self.removeTerminal(c)
rem.add(c)
self.columns -= rem
self.columnList.blockSignals(True)
self.columnList.clear()
for c in cols:
item = QtGui.QListWidgetItem(c)
item.setFlags(QtCore.Qt.ItemIsEnabled|QtCore.Qt.ItemIsUserCheckable)
if c in self.columns:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
self.columnList.addItem(item)
self.columnList.blockSignals(False)
def itemChanged(self, item):
col = str(item.text())
if item.checkState() == QtCore.Qt.Checked:
if col not in self.columns:
self.columns.add(col)
self.addOutput(col)
else:
if col in self.columns:
self.columns.remove(col)
self.removeTerminal(col)
self.update()
def saveState(self):
state = Node.saveState(self)
state['columns'] = list(self.columns)
return state
def restoreState(self, state):
Node.restoreState(self, state)
self.columns = set(state.get('columns', []))
for c in self.columns:
self.addOutput(c)
class RegionSelectNode(CtrlNode):
"""Returns a slice from a 1-D array. Connect the 'widget' output to a plot to display a region-selection widget."""
nodeName = "RegionSelect"
uiTemplate = [
('start', 'spin', {'value': 0, 'step': 0.1}),
('stop', 'spin', {'value': 0.1, 'step': 0.1}),
('display', 'check', {'value': True}),
('movable', 'check', {'value': True}),
]
def __init__(self, name):
self.items = {}
CtrlNode.__init__(self, name, terminals={
'data': {'io': 'in'},
'selected': {'io': 'out'},
'region': {'io': 'out'},
'widget': {'io': 'out', 'multi': True}
})
self.ctrls['display'].toggled.connect(self.displayToggled)
self.ctrls['movable'].toggled.connect(self.movableToggled)
def displayToggled(self, b):
for item in self.items.values():
item.setVisible(b)
def movableToggled(self, b):
for item in self.items.values():
item.setMovable(b)
def process(self, data=None, display=True):
#print "process.."
s = self.stateGroup.state()
region = [s['start'], s['stop']]
if display:
conn = self['widget'].connections()
for c in conn:
plot = c.node().getPlot()
if plot is None:
continue
if c in self.items:
item = self.items[c]
item.setRegion(region)
#print " set rgn:", c, region
#item.setXVals(events)
else:
item = LinearRegionItem(values=region)
self.items[c] = item
#item.connect(item, QtCore.SIGNAL('regionChanged'), self.rgnChanged)
item.sigRegionChanged.connect(self.rgnChanged)
item.setVisible(s['display'])
item.setMovable(s['movable'])
#print " new rgn:", c, region
#self.items[c].setYRange([0., 0.2], relative=True)
if self['selected'].isConnected():
if data is None:
sliced = None
elif (hasattr(data, 'implements') and data.implements('MetaArray')):
sliced = data[0:s['start']:s['stop']]
else:
mask = (data['time'] >= s['start']) * (data['time'] < s['stop'])
sliced = data[mask]
else:
sliced = None
return {'selected': sliced, 'widget': self.items, 'region': region}
def rgnChanged(self, item):
region = item.getRegion()
self.stateGroup.setState({'start': region[0], 'stop': region[1]})
self.update()
class EvalNode(Node):
"""Return the output of a string evaluated/executed by the python interpreter.
The string may be either an expression or a python script, and inputs are accessed as the name of the terminal.
For expressions, a single value may be evaluated for a single output, or a dict for multiple outputs.
For a script, the text will be executed as the body of a function."""
nodeName = 'PythonEval'
def __init__(self, name):
Node.__init__(self, name,
terminals = {
'input': {'io': 'in', 'renamable': True, 'multiable': True},
'output': {'io': 'out', 'renamable': True, 'multiable': True},
},
allowAddInput=True, allowAddOutput=True)
self.ui = QtGui.QWidget()
self.layout = QtGui.QGridLayout()
#self.addInBtn = QtGui.QPushButton('+Input')
#self.addOutBtn = QtGui.QPushButton('+Output')
self.text = QtGui.QTextEdit()
self.text.setTabStopWidth(30)
self.text.setPlainText("# Access inputs as args['input_name']\nreturn {'output': None} ## one key per output terminal")
#self.layout.addWidget(self.addInBtn, 0, 0)
#self.layout.addWidget(self.addOutBtn, 0, 1)
self.layout.addWidget(self.text, 1, 0, 1, 2)
self.ui.setLayout(self.layout)
#QtCore.QObject.connect(self.addInBtn, QtCore.SIGNAL('clicked()'), self.addInput)
#self.addInBtn.clicked.connect(self.addInput)
#QtCore.QObject.connect(self.addOutBtn, QtCore.SIGNAL('clicked()'), self.addOutput)
#self.addOutBtn.clicked.connect(self.addOutput)
self.text.focusOutEvent = self.focusOutEvent
self.lastText = None
def ctrlWidget(self):
return self.ui
#def addInput(self):
#Node.addInput(self, 'input', renamable=True)
#def addOutput(self):
#Node.addOutput(self, 'output', renamable=True)
def focusOutEvent(self, ev):
text = str(self.text.toPlainText())
if text != self.lastText:
self.lastText = text
self.update()
return QtGui.QTextEdit.focusOutEvent(self.text, ev)
def process(self, display=True, **args):
l = locals()
l.update(args)
## try eval first, then exec
try:
text = str(self.text.toPlainText()).replace('\n', ' ')
output = eval(text, globals(), l)
except SyntaxError:
fn = "def fn(**args):\n"
run = "\noutput=fn(**args)\n"
text = fn + "\n".join([" "+l for l in str(self.text.toPlainText()).split('\n')]) + run
exec(text)
except:
print("Error processing node: %s" % self.name())
raise
return output
def saveState(self):
state = Node.saveState(self)
state['text'] = str(self.text.toPlainText())
#state['terminals'] = self.saveTerminals()
return state
def restoreState(self, state):
Node.restoreState(self, state)
self.text.clear()
self.text.insertPlainText(state['text'])
self.restoreTerminals(state['terminals'])
self.update()
class ColumnJoinNode(Node):
"""Concatenates record arrays and/or adds new columns"""
nodeName = 'ColumnJoin'
def __init__(self, name):
Node.__init__(self, name, terminals = {
'output': {'io': 'out'},
})
#self.items = []
self.ui = QtGui.QWidget()
self.layout = QtGui.QGridLayout()
self.ui.setLayout(self.layout)
self.tree = TreeWidget()
self.addInBtn = QtGui.QPushButton('+ Input')
self.remInBtn = QtGui.QPushButton('- Input')
self.layout.addWidget(self.tree, 0, 0, 1, 2)
self.layout.addWidget(self.addInBtn, 1, 0)
self.layout.addWidget(self.remInBtn, 1, 1)
self.addInBtn.clicked.connect(self.addInput)
self.remInBtn.clicked.connect(self.remInput)
self.tree.sigItemMoved.connect(self.update)
def ctrlWidget(self):
return self.ui
def addInput(self):
#print "ColumnJoinNode.addInput called."
term = Node.addInput(self, 'input', renamable=True, removable=True, multiable=True)
#print "Node.addInput returned. term:", term
item = QtGui.QTreeWidgetItem([term.name()])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def remInput(self):
sel = self.tree.currentItem()
term = sel.term
term.joinItem = None
sel.term = None
self.tree.removeTopLevelItem(sel)
self.removeTerminal(term)
self.update()
def process(self, display=True, **args):
order = self.order()
vals = []
for name in order:
if name not in args:
continue
val = args[name]
if isinstance(val, np.ndarray) and len(val.dtype) > 0:
vals.append(val)
else:
vals.append((name, None, val))
return {'output': functions.concatenateColumns(vals)}
def order(self):
return [str(self.tree.topLevelItem(i).text(0)) for i in range(self.tree.topLevelItemCount())]
def saveState(self):
state = Node.saveState(self)
state['order'] = self.order()
return state
def restoreState(self, state):
Node.restoreState(self, state)
inputs = self.inputs()
## Node.restoreState should have created all of the terminals we need
## However: to maintain support for some older flowchart files, we need
## to manually add any terminals that were not taken care of.
for name in [n for n in state['order'] if n not in inputs]:
Node.addInput(self, name, renamable=True, removable=True, multiable=True)
inputs = self.inputs()
order = [name for name in state['order'] if name in inputs]
for name in inputs:
if name not in order:
order.append(name)
self.tree.clear()
for name in order:
term = self[name]
item = QtGui.QTreeWidgetItem([name])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def terminalRenamed(self, term, oldName):
Node.terminalRenamed(self, term, oldName)
item = term.joinItem
item.setText(0, term.name())
self.update()
| agpl-3.0 | -1,495,139,846,264,242,000 | -1,773,919,885,200,813,800 | 34.803371 | 127 | 0.5488 | false |
loveshell/volatility | volatility/plugins/mac/compressed_swap.py | 11 | 11167 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Golden G. Richard III
@license: GNU General Public License 2.0
@contact: [email protected]
@organization: Arcane Alloy, LLC
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.common as common
from struct import pack
import WKdm
class mac_compressed_swap(common.AbstractMacCommand):
""" Prints Mac OS X VM compressor stats and dumps all compressed pages """
def __init__(self, config, *args, **kwargs):
common.AbstractMacCommand.__init__(self, config, *args, **kwargs)
if config:
self._config.add_option('SKIP-WRITING', short_option = 't',
help = 'Skip writing decompressed pages, just print stats and test decompression',
action = 'store_true', default = False)
# defined in osfmk/vm/vm_compressor.h; proper decompression relies on these
self.C_SEG_BUFSIZE = (1024 * 256)
self.C_SEG_ALLOCSIZE = (self.C_SEG_BUFSIZE + 4096)
self.C_SEG_SLOT_ARRAYS = 6
self.C_SEG_SLOT_ARRAY_SIZE = 64
# defined in osfmk/vm/vm_compressor_pager.c; proper slot lookup relies on these
self.COMPRESSOR_SLOTS_CHUNK_SIZE = 512
self.COMPRESSOR_SLOTS_PER_CHUNK = 128 # (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t)), compressor_slot_t is a 32-bit int
# WKdm decompression in Python
self.wkdm=WKdm.WKdm()
# buffer for decompression
self.dest = [0] * self.wkdm.PAGE_SIZE_IN_BYTES
def calculate(self):
common.set_plugin_members(self)
com_obj_addr = self.addr_space.profile.get_symbol("_compressor_object_store")
if not com_obj_addr:
debug.error("The given memory sample does not utilize compressed swap.")
# from osfmk/vm/vm_object.h. compressor_object is the high level VM object.
compressor_object = obj.Object("vm_object", offset = com_obj_addr, vm = self.addr_space)
# from osfmk/vm/vm_compressor.c. c_segments is an array of c_segu objects, which track and store compressed pages.
# c_segment_count is current size of c_segments array.
c_segment_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segment_count"),
vm = self.addr_space)
c_segments_ptr = obj.Object("Pointer", offset = self.addr_space.profile.get_symbol("_c_segments"),
vm = self.addr_space)
c_segments = obj.Object("Array", targetType = "c_segu", count = c_segment_count,
offset = c_segments_ptr, vm = self.addr_space)
c_segments_available = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segments_available"),
vm = self.addr_space)
c_segments_busy = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_c_segments_busy"),
vm = self.addr_space)
c_segment_compressed_bytes = obj.Object("long long",
offset = self.addr_space.profile.get_symbol("_c_segment_compressed_bytes"),
vm = self.addr_space)
# This is probably a boring stat. Omit.
#c_segments_limit = obj.Object("unsigned int",
# offset = self.addr_space.profile.get_symbol("_c_segments_limit"),
# vm = self.addr_space)
#yield ("c_segments_limit", c_segments_limit, "")
# from osfmk/vm/vm_compressor.h
compressor_bytes_used = obj.Object("long long",
offset = self.addr_space.profile.get_symbol("_compressor_bytes_used"),
vm = self.addr_space)
yield ("Compressor memory used", compressor_bytes_used, "bytes")
# from osfmk/vm/vm_page.h
vm_page_active_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_active_count"),
vm = self.addr_space)
vm_page_inactive_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_inactive_count"),
vm = self.addr_space)
vm_page_free_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_free_count"),
vm = self.addr_space)
vm_page_speculative_count = obj.Object("unsigned int",
offset = self.addr_space.profile.get_symbol("_vm_page_speculative_count"),
vm = self.addr_space)
available_uncompressed = vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count
yield ("Available uncompressed memory", available_uncompressed, "pages")
available_memory = available_uncompressed + compressor_object.resident_page_count
yield ("Available memory", available_memory, "pages")
yield ("Segments available", c_segments_available, "segments")
yield ("Segments busy", c_segments_busy, "segments")
yield ("Current segment count", c_segment_count, "segments")
for i in range(c_segment_count):
if not c_segments[i].c_seg.is_valid():
yield("Segment " + str(i) + " is invalid", "SKIPPING", "")
continue
if c_segments[i].c_seg.c_ondisk == 1:
yield("Segment " + str(i) + " is swapped out", "SKIPPING", "")
continue
if c_segments[i].c_seg.c_bytes_used < 1 or c_segments[i].c_seg.c_bytes_used > self.C_SEG_ALLOCSIZE:
yield("Segment " + str(i) + " size is invalid", "SKIPPING", "")
continue
yield ("Segment " + str(i), c_segments[i].c_seg.c_bytes_used, "bytes used")
yield ("Segment " + str(i), c_segments[i].c_seg.c_bytes_unused, "bytes unused")
# walk over the two dimensional slot array (max C_SEG_SLOT_ARRAYS x C_SEG_SLOT_ARRAY SIZE elements)
# At least in 10.9, the OS X kernel zeroes an entire c_segment when it's allocated, but doesn't
# zero the C_SEG_SLOT_ARRAY_SIZE buffer when a new c_slots row is allocated, which means that
# the last valid slot needs to be tracked via the c_nextslot variable. Otherwise, garbage slots
# are encountered, which may look valid because of the limited number of bits allocated to fields
# in a struct c_slot.
j1 = 0
j2 = 0
c_nextslot = c_segments[i].c_seg.c_nextslot
yield ("Last valid slot", str((c_nextslot-1) / self.C_SEG_SLOT_ARRAY_SIZE) + ", " + str((c_nextslot-1) % self.C_SEG_SLOT_ARRAY_SIZE) , "")
while (j1 < self.C_SEG_SLOT_ARRAYS and j1 * self.C_SEG_SLOT_ARRAY_SIZE + j2 < c_nextslot):
cslot_array = c_segments[i].c_seg.c_slots[j1]
if cslot_array.is_valid():
cslots = obj.Object("Array", offset = cslot_array, targetType = "c_slot",
count = self.C_SEG_SLOT_ARRAY_SIZE, vm = self.addr_space)
while (j2 < self.C_SEG_SLOT_ARRAY_SIZE and j1 * self.C_SEG_SLOT_ARRAY_SIZE + j2 < c_nextslot):
cslot=cslots[j2]
(csize, compressed, status) = (4096 / 4, False, "UNCOMPRESSED") if (cslot.c_size == 4095) else (cslot.c_size / 4, True, "COMPRESSED")
if csize > 0:
yield (" Slot " + str(j1) + ", " + str(j2) + " offset", str(cslot.c_offset * 4), "bytes")
yield (" Slot " + str(j1) + ", " + str(j2) + " size", str(csize * 4), "bytes " + status)
cslot_data = obj.Object("Array", offset = c_segments[i].c_seg.c_store.c_buffer+cslot.c_offset * 4, targetType = "int",
count = csize, vm = self.addr_space)
yield (" Processing page at slot "+ str(j1) + ", " + str(j2),"", "")
if compressed:
# Try to decompress slot and optionally write result to file.
# Compressed data is fed to WKdm as an array of 32-bit ints.
decompressed = self.wkdm.WKdm_decompress(cslot_data, self.dest)
if decompressed > 0:
if not self._config.SKIP_WRITING:
f = open(str(i)+"-"+str(j1) + "-" + str(j2) + "-decompressed.out", 'wb')
for k in range(decompressed):
f.write(pack('<i', self.dest[k]))
f.close()
else:
yield (" Decompression failed on slot " + str(j1) + ", " + str(j2),"","SKIPPING")
elif not self._config.SKIP_WRITING:
f = open(str(i)+"-"+str(j1) + "-" + str(j2) + "-uncompressed.out", 'wb')
for k in range(0,csize):
f.write(pack('<i', cslot_data[k]))
f.close()
j2 += 1
j2=0
else:
yield(" Slot array " + str(j1) + " is invalid", "", "SKIPPING")
j1 += 1
def render_text(self, outfd, data):
for k, v1, v2 in data:
outfd.write("{0:<36} : {1:>12} {2}\n".format(k, v1, v2))
| gpl-2.0 | -9,028,021,403,184,146,000 | -7,489,088,383,398,062,000 | 52.6875 | 157 | 0.523686 | false |
limemadness/selenium_training | test_countries_sort.py | 1 | 2050 | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
#def driver(request):
# wd = webdriver.Firefox(firefox_binary="c:\\Program Files (x86)\\Mozilla Firefox\\firefox.exe")
# print(wd.capabilities)
# request.addfinalizer(wd.quit)
# return wd
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(10)
request.addfinalizer(wd.quit)
return wd
def test_countries_sort(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").click()
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").click()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_xpath("//div[2]/button").click()
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
#get country data
countries = driver.find_elements_by_css_selector("#content tr.row")
countries_timezone_url = []
country_name = []
#verify alphabetical order of country names
for country in countries:
country_name.append(country.find_element_by_css_selector("td:nth-child(5)").text)
assert sorted(country_name) == country_name
#get countries with multiple timezones
for country in countries:
if int(country.find_element_by_css_selector("td:nth-child(6)").text) > 0:
countries_timezone_url.append(country.find_element_by_css_selector("td:nth-child(5) a").get_attribute("href"))
#verify alphabetical order of timezones
for country_timezone_url in countries_timezone_url:
driver.get(country_timezone_url)
timezone_list = driver.find_elements_by_css_selector("#table-zones td:nth-child(2)")
del timezone_list[-1:]
timezones = []
for timezone in timezone_list:
timezones.append(timezone.text)
print(timezones)
assert sorted(timezones) == timezones
| apache-2.0 | 3,440,379,570,406,150,700 | 1,686,998,212,447,642,600 | 40 | 122 | 0.699024 | false |
ashwini0529/Oreo | flask_user/tests/tst_app.py | 2 | 8125 | import os
import datetime
from flask import Flask, render_template_string, request
from flask.ext.babel import Babel
from flask.ext.mail import Mail
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask.ext.user import roles_required, confirm_email_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///tst_app.sqlite')
CSRF_ENABLED = True
# Flask-Mail settings
<<<<<<< HEAD
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'Wewillrockyou')
=======
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'password')
>>>>>>> 0b3d1889f172757bcd9df1076002ef012fb747f6
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = os.getenv('MAIL_USE_SSL', True)
# Flask-User settings
USER_APP_NAME = "AppName" # Used by email templates
USER_ENABLE_USERNAME = True
USER_ENABLE_EMAIL = True
USER_ENABLE_CONFIRM_EMAIL = True
USER_ENABLE_INVITATION = True
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
babel = Babel(app) # Initialize Flask-Babel
mail = Mail(app) # Initialize Flask-Mail
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=True, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define UserEmail DataModel.
class UserEmail(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# User email information
email = db.Column(db.String(255), nullable=True, unique=True)
confirmed_at = db.Column(db.DateTime())
is_primary = db.Column(db.Boolean(), nullable=False, default=False)
# Relationship
user = db.relationship('User', uselist=False)
class UserInvitation(db.Model):
__tablename__ = 'user_invite'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False)
# save the user of the invitee
invited_by_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# token used for registration page to identify user registering
token = db.Column(db.String(100), nullable=False, server_default='')
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User, UserInvitationClass=UserInvitation)
user_manager = UserManager(db_adapter, app)
# Create regular 'member' user
if not User.query.filter(User.username=='member').first():
user = User(username='member', email='[email protected]', active=True,
password=user_manager.hash_password('Password1'), confirmed_at=datetime.datetime.utcnow())
db.session.add(user)
db.session.commit()
# Create 'user007' user with 'secret' and 'agent' roles
if not User.query.filter(User.username=='user007').first():
user1 = User(username='user007', email='[email protected]', active=True,
password=user_manager.hash_password('Password1'))
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.add(user1)
db.session.commit()
# The '/' page is accessible to anyone
@app.route('/')
def home_page():
# if current_user.is_authenticated():
# return user_profile_page()
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Home Page{%endtrans%}</h2>
<p><a href="{{ url_for('user.login') }}">{%trans%}Sign in{%endtrans%}</a></p>
{% endblock %}
""")
# The '/profile' page requires a logged-in user
@app.route('/user/profile')
@login_required # Use of @login_required decorator
@confirm_email_required
def user_profile_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Profile Page{%endtrans%}</h2>
<p> {%trans%}Hello{%endtrans%}
{{ current_user.username or current_user.email }},</p>
<p> <a href="{{ url_for('user.change_username') }}">
{%trans%}Change username{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.change_password') }}">
{%trans%}Change password{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.invite') }}">
{%trans%}Invite User{%endtrans%}</a></p>
<p> <a href="{{ url_for('user.logout') }}?next={{ url_for('user.login') }}">
{%trans%}Sign out{%endtrans%}</a></p>
{% endblock %}
""")
# The '/special' page requires a user that has the 'special' AND ('sauce' OR 'agent') role.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>{%trans%}Special Page{%endtrans%}</h2>
{% endblock %}
""")
# For testing only
app.db = db
app.UserEmailClass = UserEmail
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5555, debug=True)
| bsd-2-clause | 6,832,744,790,859,446,000 | 8,808,356,389,013,926,000 | 41.539267 | 106 | 0.596062 | false |
dabiboo/youtube-dl | youtube_dl/extractor/trutube.py | 147 | 1354 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import xpath_text
class TruTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
'info_dict': {
'id': '14880',
'ext': 'flv',
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
'thumbnail': 're:^http:.*\.jpg$',
}
}, {
'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
video_id, transform_source=lambda s: s.strip())
# filehd is always 404
video_url = xpath_text(config, './file', 'video URL', fatal=True)
title = xpath_text(config, './title', 'title').strip()
thumbnail = xpath_text(config, './image', ' thumbnail')
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
}
| unlicense | 3,415,109,169,734,612,000 | 4,541,770,382,724,303,000 | 32.85 | 103 | 0.545052 | false |
SebastienBocquet/ConvertibleUAV | Tools/MAVLink/mavlink/pymavlink/mavwp.py | 5 | 13222 | '''
module for loading/saving waypoints
'''
import mavutil, time, copy
import logging
import mavutil
try:
from google.protobuf import text_format
import mission_pb2
HAVE_PROTOBUF = True
except ImportError:
HAVE_PROTOBUF = False
class MAVWPError(Exception):
'''MAVLink WP error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVWPLoader(object):
'''MAVLink waypoint loader'''
def __init__(self, target_system=0, target_component=0):
self.wpoints = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of waypoints'''
return len(self.wpoints)
def wp(self, i):
'''return a waypoint'''
return self.wpoints[i]
def add(self, w, comment=''):
'''add a waypoint'''
w = copy.copy(w)
if comment:
w.comment = comment
w.seq = self.count()
self.wpoints.append(w)
self.last_change = time.time()
def set(self, w, idx):
'''set a waypoint'''
w.seq = idx
if w.seq == self.count():
return self.add(w)
if self.count() <= idx:
raise MAVWPError('adding waypoint at idx=%u past end of list (count=%u)' % (idx, self.count()))
self.wpoints[idx] = w
self.last_change = time.time()
def remove(self, w):
'''remove a waypoint'''
self.wpoints.remove(w)
self.last_change = time.time()
def clear(self):
'''clear waypoint list'''
self.wpoints = []
self.last_change = time.time()
def _read_waypoints_v100(self, file):
'''read a version 100 waypoint'''
cmdmap = {
2 : mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
3 : mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH,
4 : mavutil.mavlink.MAV_CMD_NAV_LAND,
24: mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
26: mavutil.mavlink.MAV_CMD_NAV_LAND,
25: mavutil.mavlink.MAV_CMD_NAV_WAYPOINT ,
27: mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM
}
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 13:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[1]), # frame
int(a[2]), # action
int(a[7]), # current
int(a[12]), # autocontinue
float(a[5]), # param1,
float(a[6]), # param2,
float(a[3]), # param3
float(a[4]), # param4
float(a[9]), # x, latitude
float(a[8]), # y, longitude
float(a[10]) # z
)
if not w.command in cmdmap:
raise MAVWPError("Unknown v100 waypoint action %u" % w.command)
w.command = cmdmap[w.command]
self.add(w, comment)
comment = ''
def _read_waypoints_v110(self, file):
'''read a version 110 waypoint'''
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 12:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[2]), # frame
int(a[3]), # command
int(a[1]), # current
int(a[11]), # autocontinue
float(a[4]), # param1,
float(a[5]), # param2,
float(a[6]), # param3
float(a[7]), # param4
float(a[8]), # x (latitude)
float(a[9]), # y (longitude)
float(a[10]) # z (altitude)
)
if w.command == 0 and w.seq == 0 and self.count() == 0:
# special handling for Mission Planner created home wp
w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
self.add(w, comment)
comment = ''
def _read_waypoints_pb_110(self, file):
if not HAVE_PROTOBUF:
raise MAVWPError(
'Cannot read mission file in protobuf format without protobuf '
'library. Try "easy_install protobuf".')
explicit_seq = False
warned_seq = False
mission = mission_pb2.Mission()
text_format.Merge(file.read(), mission)
defaults = mission_pb2.Waypoint()
# Set defaults (may be overriden in file).
defaults.current = False
defaults.autocontinue = True
defaults.param1 = 0.0
defaults.param2 = 0.0
defaults.param3 = 0.0
defaults.param4 = 0.0
defaults.x = 0.0
defaults.y = 0.0
defaults.z = 0.0
# Use defaults specified in mission file, if there are any.
if mission.defaults:
defaults.MergeFrom(mission.defaults)
for seq, waypoint in enumerate(mission.waypoint):
# Consecutive sequence numbers are automatically assigned
# UNLESS the mission file specifies sequence numbers of
# its own.
if waypoint.seq:
explicit_seq = True
else:
if explicit_seq and not warned_seq:
logging.warn(
'Waypoint file %s: mixes explicit and implicit '
'sequence numbers' % (file,))
warned_seq = True
# The first command has current=True, the rest have current=False.
if seq > 0:
current = defaults.current
else:
current = True
w = mavutil.mavlink.MAVLink_mission_item_message(
self.target_system, self.target_component,
waypoint.seq or seq,
waypoint.frame,
waypoint.command,
waypoint.current or current,
waypoint.autocontinue or defaults.autocontinue,
waypoint.param1 or defaults.param1,
waypoint.param2 or defaults.param2,
waypoint.param3 or defaults.param3,
waypoint.param4 or defaults.param4,
waypoint.x or defaults.x,
waypoint.y or defaults.y,
waypoint.z or defaults.z)
self.add(w)
def load(self, filename):
'''load waypoints from a file.
returns number of waypoints loaded'''
f = open(filename, mode='r')
version_line = f.readline().strip()
if version_line == "QGC WPL 100":
readfn = self._read_waypoints_v100
elif version_line == "QGC WPL 110":
readfn = self._read_waypoints_v110
elif version_line == "QGC WPL PB 110":
readfn = self._read_waypoints_pb_110
else:
f.close()
raise MAVWPError("Unsupported waypoint format '%s'" % version_line)
self.clear()
readfn(f)
f.close()
return len(self.wpoints)
def save_as_pb(self, filename):
mission = mission_pb2.Mission()
for w in self.wpoints:
waypoint = mission.waypoint.add()
waypoint.command = w.command
waypoint.frame = w.frame
waypoint.seq = w.seq
waypoint.current = w.current
waypoint.autocontinue = w.autocontinue
waypoint.param1 = w.param1
waypoint.param2 = w.param2
waypoint.param3 = w.param3
waypoint.param4 = w.param4
waypoint.x = w.x
waypoint.y = w.y
waypoint.z = w.z
with open(filename, 'w') as f:
f.write('QGC WPL PB 110\n')
f.write(text_format.MessageToString(mission))
def save(self, filename):
'''save waypoints to a file'''
f = open(filename, mode='w')
f.write("QGC WPL 110\n")
for w in self.wpoints:
if getattr(w, 'comment', None):
f.write("# %s\n" % w.comment)
f.write("%u\t%u\t%u\t%u\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%u\n" % (
w.seq, w.current, w.frame, w.command,
w.param1, w.param2, w.param3, w.param4,
w.x, w.y, w.z, w.autocontinue))
f.close()
def polygon(self, done=None):
'''return a polygon for the waypoints'''
points = []
if done is None:
done = set()
idx = 0
# find first point not done yet
while idx < self.count():
if not idx in done:
break
idx += 1
while idx < self.count():
w = self.wp(idx)
if idx in done:
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
break
done.add(idx)
if w.command == mavutil.mavlink.MAV_CMD_DO_JUMP:
idx = int(w.param1)
w = self.wp(idx)
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
continue
idx += 1
if (w.x != 0 or w.y != 0) and w.command in [mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TURNS,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME,
mavutil.mavlink.MAV_CMD_NAV_LAND,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF]:
points.append((w.x, w.y))
return points
def polygon_list(self):
'''return a list of polygons for the waypoints'''
done = set()
ret = []
while len(done) != self.count():
p = self.polygon(done)
if len(p) > 0:
ret.append(p)
return ret
class MAVFenceError(Exception):
'''MAVLink fence error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVFenceLoader(object):
'''MAVLink geo-fence loader'''
def __init__(self, target_system=0, target_component=0):
self.points = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of points'''
return len(self.points)
def point(self, i):
'''return a point'''
return self.points[i]
def add(self, p):
'''add a point'''
self.points.append(p)
self.last_change = time.time()
def clear(self):
'''clear point list'''
self.points = []
self.last_change = time.time()
def load(self, filename):
'''load points from a file.
returns number of points loaded'''
f = open(filename, mode='r')
self.clear()
for line in f:
if line.startswith('#'):
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 2:
raise MAVFenceError("invalid fence point line: %s" % line)
p = mavutil.mavlink.MAVLink_fence_point_message(self.target_system, self.target_component,
self.count(), 0, float(a[0]), float(a[1]))
self.add(p)
f.close()
for i in range(self.count()):
self.points[i].count = self.count()
return len(self.points)
def save(self, filename):
'''save fence points to a file'''
f = open(filename, mode='w')
for p in self.points:
f.write("%f\t%f\n" % (p.lat, p.lng))
f.close()
def polygon(self):
'''return a polygon for the fence'''
points = []
for fp in self.points[1:]:
points.append((fp.lat, fp.lng))
return points
| gpl-3.0 | 8,573,963,178,603,970,000 | 6,826,207,816,456,404,000 | 34.258667 | 107 | 0.492286 | false |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/konlpy/corpus.py | 1 | 1849 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
from . import utils
class CorpusLoader():
"""Loader for corpora.
For a complete list of corpora available in KoNLPy,
refer to :ref:`corpora`.
.. code-block:: python
>>> from konlpy.corpus import kolaw
>>> fids = kolaw.fileids()
>>> fobj = kolaw.open(fids[0])
>>> print fobj.read(140)
대한민국헌법
유구한 역사와 전통에 빛나는 우리 대한국민은 3·1운동으로 건립된 대한민국임시정부의 법통과 불의에 항거한 4·19민주이념을 계승하고, 조국의 민주개혁과 평화적 통일의 사명에 입각하여 정의·인도와 동포애로써 민족의 단결을 공고히 하고, 모든 사회적 폐습과 불의를 타파하며, 자율과 조화를 바 바
"""
def abspath(self, filename=None):
"""Absolute path of corpus file.
If ``filename`` is *None*, returns absolute path of corpus.
:param filename: Name of a particular file in the corpus.
"""
basedir = '%s/data/corpus/%s' % (utils.installpath, self.name)
if filename:
return '%s/%s' % (basedir, filename)
else:
return '%s/' % basedir
def fileids(self):
"""List of file IDs in the corpus."""
return os.listdir(self.abspath())
def open(self, filename):
"""Method to open a file in the corpus.
Returns a file object.
:param filename: Name of a particular file in the corpus.
"""
return utils.load_txt(self.abspath(filename))
def __init__(self, name=None):
if not name:
raise Exception("You need to input the name of the corpus")
else:
self.name = name
kolaw = CorpusLoader('kolaw')
kobill = CorpusLoader('kobill')
| gpl-3.0 | 8,070,391,668,151,934,000 | 9,205,287,063,832,514,000 | 27.035088 | 171 | 0.58761 | false |
ErickMurillo/aprocacaho | organizacion/admin.py | 1 | 3456 | from django.contrib import admin
from .models import *
# Register your models here.
#organizacion
class InlineEscuelaCampo(admin.TabularInline):
model = EscuelaCampo
extra = 1
class OrganizacionAdmin(admin.ModelAdmin):
inlines = [InlineEscuelaCampo]
list_display = ('id','nombre','siglas')
list_display_links = ('id','nombre','siglas')
#encuesta organizacion
class InlineAspectosJuridicos(admin.TabularInline):
model = AspectosJuridicos
max_num = 1
can_delete = False
class InlineListaMiembros(admin.TabularInline):
model = ListaMiembros
extra = 1
class InlineDocumentacion(admin.TabularInline):
model = Documentacion
extra = 1
max_num = 7
class InlineProduccionComercializacion(admin.TabularInline):
model = ProduccionComercializacion
extra = 1
class InlineNivelCumplimiento(admin.TabularInline):
model = NivelCumplimiento
extra = 1
max_num = 7
# class InlineDatosProductivos(admin.TabularInline):
# model = DatosProductivos
# extra = 1
# max_num = 4
#
# class InlineDatosProductivosTabla(admin.TabularInline):
# model = DatosProductivosTabla
# extra = 1
# max_num = 2
class InlineInfraestructura(admin.TabularInline):
model = Infraestructura
extra = 1
class InlineTransporte(admin.TabularInline):
model = Transporte
max_num = 1
can_delete = False
# class InlineComercializacion(admin.TabularInline):
# model = Comercializacion
# extra = 1
# max_num = 3
#
# class InlineCacaoComercializado(admin.TabularInline):
# model = CacaoComercializado
# max_num = 1
# can_delete = False
class InlineCertificacionOrg(admin.TabularInline):
model = CertificacionOrg
max_num = 1
can_delete = False
class InlineDestinoProdCorriente(admin.TabularInline):
model = DestinoProdCorriente
extra = 1
max_num = 4
class InlineDestinoProdFermentado(admin.TabularInline):
model = DestinoProdFermentado
extra = 1
max_num = 4
class InlineFinanciamiento(admin.TabularInline):
model = Financiamiento
max_num = 1
can_delete = False
class InlineFinanciamientoProductores(admin.TabularInline):
model = FinanciamientoProductores
extra = 1
max_num = 5
class InlineInfoFinanciamiento(admin.TabularInline):
model = InfoFinanciamiento
extra = 1
max_num = 4
class EncuestaOrganicacionAdmin(admin.ModelAdmin):
# def get_queryset(self, request):
# if request.user.is_superuser:
# return EncuestaOrganicacion.objects.all()
# return EncuestaOrganicacion.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
obj.usuario = request.user
obj.save()
inlines = [InlineAspectosJuridicos,InlineListaMiembros,InlineDocumentacion,
InlineNivelCumplimiento,InlineProduccionComercializacion,
InlineInfraestructura,InlineTransporte,
InlineCertificacionOrg,InlineDestinoProdCorriente,InlineDestinoProdFermentado,
InlineFinanciamiento,InlineFinanciamientoProductores,InlineInfoFinanciamiento]
list_display = ('id','organizacion','fecha')
list_display_links = ('id','organizacion')
class Media:
css = {
'all': ('css/admin.css',)
}
js = ('js/admin_org.js',)
admin.site.register(Organizacion,OrganizacionAdmin)
admin.site.register(EncuestaOrganicacion,EncuestaOrganicacionAdmin)
| mit | 58,939,343,735,298,770 | 8,625,154,643,396,912,000 | 26.648 | 94 | 0.712095 | false |
laurenrevere/osf.io | osf_tests/conftest.py | 6 | 2359 | import logging
import pytest
from faker import Factory
from framework.django.handlers import handlers as django_handlers
from framework.flask import rm_handlers
from website import settings
from website.app import init_app
from website.project.signals import contributor_added
from website.project.views.contributor import notify_added_contributor
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
'transitions.core',
'MARKDOWN',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# NOTE: autouse so that ADDONS_REQUESTED gets set on website.settings
@pytest.fixture(autouse=True, scope='session')
def app():
try:
test_app = init_app(routes=True, set_backends=False)
except AssertionError: # Routes have already been set up
test_app = init_app(routes=False, set_backends=False)
rm_handlers(test_app, django_handlers)
test_app.testing = True
return test_app
@pytest.yield_fixture()
def request_context(app):
context = app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
context.push()
yield context
context.pop()
DISCONNECTED_SIGNALS = {
# disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor]
}
@pytest.fixture(autouse=True)
def disconnected_signals():
for signal in DISCONNECTED_SIGNALS:
for receiver in DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
@pytest.fixture(autouse=True)
def patched_settings():
"""Patch settings for tests"""
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
settings.BCRYPT_LOG_ROUNDS = 1
@pytest.fixture()
def fake():
return Factory.create()
| apache-2.0 | 4,720,256,677,959,105,000 | 4,087,798,156,439,946,000 | 28.860759 | 107 | 0.71471 | false |
linktlh/Toontown-journey | toontown/toon/InventoryBase.py | 3 | 11970 | from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase.ToontownBattleGlobals import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
class InventoryBase(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('InventoryBase')
def __init__(self, toon, invStr = None):
self._createStack = str(StackTrace().compact())
self.toon = toon
if invStr == None:
self.inventory = []
for track in xrange(0, len(Tracks)):
level = []
for thisLevel in xrange(0, len(Levels[track])):
level.append(0)
self.inventory.append(level)
else:
self.inventory = self.makeFromNetString(invStr)
self.calcTotalProps()
return
def unload(self):
del self.toon
def __str__(self):
retStr = 'totalProps: %d\n' % self.totalProps
for track in xrange(0, len(Tracks)):
retStr += Tracks[track] + ' = ' + str(self.inventory[track]) + '\n'
return retStr
def updateInvString(self, invString):
inventory = self.makeFromNetString(invString)
self.updateInventory(inventory)
return None
def updateInventory(self, inv):
self.inventory = inv
self.calcTotalProps()
def makeNetString(self):
dataList = self.inventory
datagram = PyDatagram()
for track in xrange(0, len(Tracks)):
for level in xrange(0, len(Levels[track])):
datagram.addUint8(dataList[track][level])
dgi = PyDatagramIterator(datagram)
return dgi.getRemainingBytes()
def makeFromNetString(self, netString):
dataList = []
dg = PyDatagram(netString)
dgi = PyDatagramIterator(dg)
for track in xrange(0, len(Tracks)):
subList = []
for level in xrange(0, len(Levels[track])):
if dgi.getRemainingSize() > 0:
value = dgi.getUint8()
else:
value = 0
subList.append(value)
dataList.append(subList)
return dataList
def makeFromNetStringForceSize(self, netString, numTracks, numLevels):
dataList = []
dg = PyDatagram(netString)
dgi = PyDatagramIterator(dg)
for track in xrange(0, numTracks):
subList = []
for level in xrange(0, numLevels):
if dgi.getRemainingSize() > 0:
value = dgi.getUint8()
else:
value = 0
subList.append(value)
dataList.append(subList)
return dataList
def addItem(self, track, level):
return self.addItems(track, level, 1)
def addItems(self, track, level, amount):
if isinstance(track, str):
track = Tracks.index(track)
max = self.getMax(track, level)
if (not hasattr(self.toon, 'experience')) or (not hasattr(self.toon.experience, 'getExpLevel')):
return 0
if not (self.toon.experience.getExpLevel(track) >= level and self.toon.hasTrackAccess(track)):
return 0
if self.numItem(track, level) > max - amount:
return -1
if not (self.totalProps + amount <= self.toon.getMaxCarry() or level > LAST_REGULAR_GAG_LEVEL):
return -2
self.inventory[track][level] += amount
self.totalProps += amount
return self.inventory[track][level]
def addItemWithList(self, track, levelList):
for level in levelList:
self.addItem(track, level)
def numItem(self, track, level):
if isinstance(track, str):
track = Tracks.index(track)
if track > len(Tracks) - 1 or level > len(Levels) - 1:
self.notify.warning("%s is using a gag that doesn't exist %s %s!" % (self.toon.doId, track, level))
return -1
return self.inventory[track][level]
def useItem(self, track, level):
if type(track) == type(''):
track = Tracks.index(track)
if self.numItem(track, level) > 0:
self.inventory[track][level] -= 1
self.calcTotalProps()
return 1
elif self.numItem(track, level) == -1:
return -1
def setItem(self, track, level, amount):
if type(track) == type(''):
track = Tracks.index(track)
max = self.getMax(track, level)
curAmount = self.numItem(track, level)
if self.toon.experience.getExpLevel(track) >= level:
if amount <= max:
if self.totalProps - curAmount + amount <= self.toon.getMaxCarry():
self.inventory[track][level] = amount
self.totalProps = self.totalProps - curAmount + amount
return self.inventory[track][level]
else:
return -2
else:
return -1
else:
return 0
def getMax(self, track, level):
if type(track) == type(''):
track = Tracks.index(track)
maxList = CarryLimits[track]
if self.toon.experience:
return maxList[self.toon.experience.getExpLevel(track)][level]
else:
return 0
def getTrackAndLevel(self, propName):
for track in xrange(0, len(Tracks)):
if AvProps[track].count(propName):
return (tracks, AvProps[track].index(propName))
return (-1, -1)
def calcTotalProps(self):
self.totalProps = 0
for track in xrange(0, len(Tracks)):
for level in xrange(0, len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL:
self.totalProps += self.numItem(track, level)
return None
def countPropsInList(self, invList):
totalProps = 0
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL:
totalProps += invList[track][level]
return totalProps
def setToMin(self, newInventory):
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
self.inventory[track][level] = min(self.inventory[track][level], newInventory[track][level])
self.calcTotalProps()
return None
def validateItemsBasedOnExp(self, newInventory, allowUber = 0):
if type(newInventory) == type('String'):
tempInv = self.makeFromNetString(newInventory)
else:
tempInv = newInventory
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
if tempInv[track][level] > self.getMax(track, level):
return 0
if tempInv[track][level] > 0 and not self.toon.hasTrackAccess(track):
commentStr = "Player %s trying to purchase gag they don't have track access to. track: %s level: %s" % (self.toon.doId, track, level)
dislId = self.toon.DISLid
if simbase.config.GetBool('want-ban-gagtrack', False):
#simbase.air.banManager.ban(self.toon.doId, dislId, commentStr)
pass
return 0
if level > LAST_REGULAR_GAG_LEVEL and tempInv[track][level] > self.inventory[track][level] or allowUber:
return 0
return 1
def validateItemsBasedOnAccess(self, newInventory):
return 1
def getMinCostOfPurchase(self, newInventory):
return self.countPropsInList(newInventory) - self.totalProps
def validatePurchase(self, newInventory, currentMoney, newMoney):
if newMoney > currentMoney:
self.notify.warning('Somebody lied about their money! Rejecting purchase.')
return 0
newItemTotal = self.countPropsInList(newInventory)
oldItemTotal = self.totalProps
if newItemTotal > oldItemTotal + currentMoney:
self.notify.warning('Somebody overspent! Rejecting purchase.')
return 0
if newItemTotal - oldItemTotal > currentMoney - newMoney:
self.notify.warning('Too many items based on money spent! Rejecting purchase.')
return 0
if newItemTotal > self.toon.getMaxCarry():
self.notify.warning('Cannot carry %s items! Rejecting purchase.' % newItemTotal)
return 0
if not self.validateItemsBasedOnExp(newInventory):
self.notify.warning('Somebody is trying to buy forbidden items! ' + 'Rejecting purchase.')
return 0
if not self.validateItemsBasedOnAccess(newInventory):
simbase.air.writeServerEvent('suspicious', self.toon.doId, 'non-paid av trying to purchase paid gags')
return 0
self.updateInventory(newInventory)
return 1
def maxOutInv(self, filterUberGags = 0, filterPaidGags = 0):
unpaid = self.toon.getGameAccess() != ToontownGlobals.AccessFull
for track in xrange(len(Tracks)):
if self.toon.hasTrackAccess(track):
for level in xrange(len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL or not filterUberGags:
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
self.addItem(track, level)
addedAnything = 1
while addedAnything:
addedAnything = 0
result = 0
for track in xrange(len(Tracks)):
if self.toon.hasTrackAccess(track):
level = len(Levels[track]) - 1
if level > LAST_REGULAR_GAG_LEVEL and filterUberGags:
level = LAST_REGULAR_GAG_LEVEL
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
result = self.addItem(track, level)
level -= 1
while result <= 0 and level >= 0:
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
result = self.addItem(track, level)
level -= 1
if result > 0:
addedAnything = 1
self.calcTotalProps()
return None
def NPCMaxOutInv(self, targetTrack=-1, maxLevelIndex=5):
result = 0
for level in xrange(maxLevelIndex, -1, -1):
anySpotsAvailable = 1
while anySpotsAvailable == 1:
anySpotsAvailable = 0
trackResults = []
for track in xrange(len(Tracks)):
if targetTrack != -1 and targetTrack != track:
continue
result = self.addItem(track, level)
trackResults.append(result)
if result == -2:
break
for res in trackResults:
if res > 0:
anySpotsAvailable = 1
if result == -2:
break
self.calcTotalProps()
return None
def zeroInv(self, killUber = 0):
for track in xrange(len(Tracks)):
for level in xrange(UBER_GAG_LEVEL_INDEX):
self.inventory[track][level] = 0
if killUber:
self.inventory[track][UBER_GAG_LEVEL_INDEX] = 0
if self.inventory[track][UBER_GAG_LEVEL_INDEX] > 1:
self.inventory[track][UBER_GAG_LEVEL_INDEX] = 1
self.calcTotalProps()
return None
def _garbageInfo(self):
return self._createStack
| apache-2.0 | 3,769,430,143,037,805,600 | -5,412,757,066,043,752,000 | 37 | 153 | 0.568588 | false |
srio/shadow3-scripts | transfocator_id30b.py | 1 | 25823 | import numpy
import xraylib
"""
transfocator_id30b : transfocator for id13b:
It can:
1) guess the lens configuration (number of lenses for each type) for a given photon energy
and target image size. Use transfocator_compute_configuration() for this task
2) for a given transfocator configuration, compute the main optical parameters
(image size, focal distance, focal position and divergence).
Use transfocator_compute_parameters() for this task
3) Performs full ray tracing. Use id30b_ray_tracing() for this task
Note that for the optimization and parameters calculations the transfocator configuration is
given in keywords. For ray tracing calculations many parameters of the transfocator are hard coded
with the values of id30b
See main program for examples.
Dependencies:
Numpy
xraylib (to compute refracion indices)
Shadow (for ray tracing only)
matplotlib (for some plots of ray=tracing)
Side effects:
When running ray tracing some files are created.
MODIFICATION HISTORY:
2015-03-25 [email protected], written
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "[email protected]"
__copyright__ = "ESRF, 2015"
def transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=1 ):
"""
Computes the optimum transfocator configuration for a given photon energy and target image size.
All length units are cm
:param photon_energy_ev: the photon energy in eV
:param s_target: the target image size in cm.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:param:verbose: set to 1 for verbose text output
:return: a list with the number of lenses of each type.
"""
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return None
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \
lens_diameter=lens_diameter,method=2)
focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p)
div_q_target = alpha * lens_diameter / focal_q_target
#corrections for extreme cases
source_demagnified = 2.35*sigmaz*focal_q_target/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2)
nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\
nlenses_max=nlenses_max,radii=nlenses_radii, )
if verbose:
print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target))
print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target))
print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4))
print("transfocator_compute_configuration: nlenses_target: ",nlenses_target)
return nlenses_target
def transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 ):
"""
Computes the parameters of the optical performances of a given transgocator configuration.
returns a l
All length units are cm
:param photon_energy_ev:
:param nlenses_target: a list with the lens configuration, i.e. the number of lenses of each type.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
TODO: remove (not used)
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:return: a list with parameters (image_siza, lens_focal_distance,
focal_position from transfocator center, divergence of beam after the transfocator)
"""
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_f = _transfocator_calculate_focal_distance( deltas=deltas,\
nlenses=nlenses_target,radii=nlenses_radii)
focal_q = 1.0 / (1.0/focal_f - 1.0/tf_p)
div_q = alpha * lens_diameter / focal_q
#corrections
source_demagnified = 2.35*sigmaz*focal_q/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target = numpy.sqrt( (div_q*(tf_q-focal_q))**2 + (source_demagnified)**2 )
return (s_target,focal_f,focal_q,div_q)
def transfocator_nlenses_to_slots(nlenses,nlenses_max=None):
"""
converts the transfocator configuration from a list of the number of lenses of each type,
into a list of active (1) or inactive (0) actuators for the slots.
:param nlenses: the list with number of lenses (e.g., [5,2,0]
:param nlenses_max: the maximum number of lenses of each type, usually powers of two minus one.
E.g. [15,3,1]
:return: a list of on (1) and off (0) slots, e.g., [1, 0, 1, 0, 0, 1, 0]
(first type: 1*1+0*2+1*4+0*8=5, second type: 0*1+1*2=2, third type: 0*1=0)
"""
if nlenses_max == None:
nlenses_max = nlenses
ss = []
for i,iopt in enumerate(nlenses):
if iopt > nlenses_max[i]:
print("Error: i:%d, nlenses: %d, nlenses_max: %d"%(i,iopt,nlenses_max[i]))
ncharacters = len("{0:b}".format(nlenses_max[i]))
si = list( ("{0:0%db}"%(ncharacters)).format(int(iopt)) )
si.reverse()
ss += si
on_off = [int(i) for i in ss]
#print("transfocator_nlenses_to_slots: nlenses_max: ",nlenses_max," nlenses: ",nlenses," slots: ",on_off)
return on_off
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _tansfocator_guess_focal_position( s_target, p=5960., q=3800.0, sigmaz=6.46e-4, \
alpha=0.66, lens_diameter=0.05, method=2):
x = 1e15
if method == 1: # simple sum
AA = 2.35*sigmaz/p
BB = -(s_target + alpha * lens_diameter)
CC = alpha*lens_diameter*q
cc = numpy.roots([AA,BB,CC])
x = cc[1]
return x
if method == 2: # sum in quadrature
AA = ( (2.35*sigmaz)**2)/(p**2)
BB = 0.0
CC = alpha**2 * lens_diameter**2 - s_target**2
DD = - 2.0 * alpha**2 * lens_diameter**2 * q
EE = alpha**2 * lens_diameter**2 * q**2
cc = numpy.roots([AA,BB,CC,DD,EE])
for i,cci in enumerate(cc):
if numpy.imag(cci) == 0:
return numpy.real(cci)
return x
def _transfocator_guess_configuration(focal_f_target,deltas=[0.999998],nlenses_max=[15],radii=[500e-4]):
nn = len(nlenses_max)
ncombinations = (1+nlenses_max[0]) * (1+nlenses_max[1]) * (1+nlenses_max[2])
icombinations = 0
aa = numpy.zeros((3,ncombinations),dtype=int)
bb = numpy.zeros(ncombinations)
for i0 in range(1+nlenses_max[0]):
for i1 in range(1+nlenses_max[1]):
for i2 in range(1+nlenses_max[2]):
aa[0,icombinations] = i0
aa[1,icombinations] = i1
aa[2,icombinations] = i2
bb[icombinations] = focal_f_target - _transfocator_calculate_focal_distance(deltas=deltas,nlenses=[i0,i1,i2],radii=radii)
icombinations += 1
bb1 = numpy.abs(bb)
ibest = bb1.argmin()
return (aa[:,ibest]).tolist()
#
#
#
def id30b_ray_tracing(emittH=4e-9,emittV=1e-11,betaH=35.6,betaV=3.0,number_of_rays=50000,\
density=1.845,symbol="Be",tf_p=1000.0,tf_q=1000.0,lens_diameter=0.05,\
slots_max=None,slots_on_off=None,photon_energy_ev=14000.0,\
slots_lens_thickness=None,slots_steps=None,slots_radii=None,\
s_target=10e-4,focal_f=10.0,focal_q=10.0,div_q=1e-6):
#=======================================================================================================================
# Gaussian undulator source
#=======================================================================================================================
import Shadow
#import Shadow.ShadowPreprocessorsXraylib as sx
sigmaXp = numpy.sqrt(emittH/betaH)
sigmaZp = numpy.sqrt(emittV/betaV)
sigmaX = emittH/sigmaXp
sigmaZ = emittV/sigmaZp
print("\n\nElectron sizes H:%f um, V:%fu m;\nelectron divergences: H:%f urad, V:%f urad"%\
(sigmaX*1e6, sigmaZ*1e6, sigmaXp*1e6, sigmaZp*1e6))
# set Gaussian source
src = Shadow.Source()
src.set_energy_monochromatic(photon_energy_ev)
src.set_gauss(sigmaX*1e2,sigmaZ*1e2,sigmaXp,sigmaZp)
print("\n\nElectron sizes stored H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8, user_unit_to_m=1e-2, verbose=1)
print("\n\nElectron sizes stored (undulator) H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
print("\n\nSource size in vertical FWHM: %f um\n"%\
(2.35*src.SIGMAZ*1e4))
src.NPOINT = number_of_rays
src.ISTAR1 = 0 # 677543155
src.write("start.00")
# create source
beam = Shadow.Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
#=======================================================================================================================
# complete the (detailed) transfocator description
#=======================================================================================================================
print("\nSetting detailed Transfocator for ID30B")
slots_nlenses = numpy.array(slots_max)*numpy.array(slots_on_off)
slots_empty = (numpy.array(slots_max)-slots_nlenses)
#
####interactive=True, SYMBOL="SiC",DENSITY=3.217,FILE="prerefl.dat",E_MIN=100.0,E_MAX=20000.0,E_STEP=100.0
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=False,E_MIN=2000.0,E_MAX=55000.0,E_STEP=100.0,\
DENSITY=density,SYMBOL=symbol,FILE="Be2_55.dat" )
nslots = len(slots_max)
prerefl_file = ["Be2_55.dat" for i in range(nslots)]
print("slots_max: ",slots_max)
#print("slots_target: ",slots_target)
print("slots_on_off: ",slots_on_off)
print("slots_steps: ",slots_steps)
print("slots_radii: ",slots_radii)
print("slots_nlenses: ",slots_nlenses)
print("slots_empty: ",slots_empty)
#calculate distances, nlenses and slots_empty
# these are distances p and q with TF length removed
tf_length = numpy.array(slots_steps).sum() #tf length in cm
tf_fs_before = tf_p - 0.5*tf_length #distance from source to center of transfocator
tf_fs_after = tf_q - 0.5*tf_length # distance from center of transfocator to image
# for each slot, these are the empty distances before and after the lenses
tf_p0 = numpy.zeros(nslots)
tf_q0 = numpy.array(slots_steps) - (numpy.array(slots_max) * slots_lens_thickness)
# add now the p q distances
tf_p0[0] += tf_fs_before
tf_q0[-1] += tf_fs_after
print("tf_p0: ",tf_p0)
print("tf_q0: ",tf_q0)
print("tf_length: %f cm"%(tf_length))
# build transfocator
tf = Shadow.CompoundOE(name='TF ID30B')
tf.append_transfocator(tf_p0.tolist(), tf_q0.tolist(), \
nlenses=slots_nlenses.tolist(), radius=slots_radii, slots_empty=slots_empty.tolist(),\
thickness=slots_lens_thickness, prerefl_file=prerefl_file,\
surface_shape=4, convex_to_the_beam=0, diameter=lens_diameter,\
cylinder_angle=0.0,interthickness=50e-4,use_ccc=0)
itmp = input("SHADOW Source complete. Do you want to run SHADOR trace? [1=Yes,0=No]: ")
if str(itmp) != "1":
return
#trace system
tf.dump_systemfile()
beam.traceCompoundOE(tf,write_start_files=0,write_end_files=0,write_star_files=0, write_mirr_files=0)
#write only last result file
beam.write("star_tf.dat")
print("\nFile written to disk: star_tf.dat")
#
# #ideal calculations
#
print("\n\n\n")
print("=============================================== TRANSFOCATOR OUTPUTS ==========================================")
print("\nTHEORETICAL results: ")
print("REMIND-----With these lenses we obtained (analytically): ")
print("REMIND----- focal_f: %f cm"%(focal_f))
print("REMIND----- focal_q: %f cm"%(focal_q))
print("REMIND----- s_target: %f um"%(s_target*1e4))
demagnification_factor = tf_p/focal_q
theoretical_focal_size = src.SIGMAZ*2.35/demagnification_factor
# analyze shadow results
print("\nSHADOW results: ")
st1 = beam.get_standard_deviation(3,ref=0)
st2 = beam.get_standard_deviation(3,ref=1)
print(" stDev*2.35: unweighted: %f um, weighted: %f um "%(st1*2.35*1e4,st2*2.35*1e4))
tk = beam.histo1(3, nbins=75, ref=1, nolost=1, write="HISTO1")
print(" Histogram FWHM: %f um "%(1e4*tk["fwhm"]))
print(" Transmitted intensity: %f (source was: %d) (transmission is %f %%) "%(beam.intensity(nolost=1), src.NPOINT, beam.intensity(nolost=1)/src.NPOINT*100))
#scan around image
xx1 = numpy.linspace(0.0,1.1*tf_fs_after,11) # position from TF exit plane
#xx0 = focal_q - tf_length*0.5
xx0 = focal_q - tf_length*0.5 # position of focus from TF exit plane
xx2 = numpy.linspace(xx0-100.0,xx0+100,21) # position from TF exit plane
xx3 = numpy.array([tf_fs_after])
xx = numpy.concatenate(([-0.5*tf_length],xx1,xx2,[tf_fs_after]))
xx.sort()
f = open("id30b.spec","w")
f.write("#F id30b.spec\n")
f.write("\n#S 1 calculations for id30b transfocator\n")
f.write("#N 8\n")
labels = " %18s %18s %18s %18s %18s %18s %18s %18s"%\
("pos from source","pos from image","[pos from TF]", "pos from TF center", "pos from focus",\
"fwhm shadow(stdev)","fwhm shadow(histo)","fwhm theoretical")
f.write("#L "+labels+"\n")
out = numpy.zeros((8,xx.size))
for i,pos in enumerate(xx):
beam2 = beam.duplicate()
beam2.retrace(-tf_fs_after+pos)
fwhm1 = 2.35*1e4*beam2.get_standard_deviation(3,ref=1,nolost=1)
tk = beam2.histo1(3, nbins=75, ref=1, nolost=1)
fwhm2 = 1e4*tk["fwhm"]
#fwhm_th = 1e4*transfocator_calculate_estimated_size(pos,diameter=diameter,focal_distance=focal_q)
fwhm_th2 = 1e4*numpy.sqrt( (div_q*(pos+0.5*tf_length-focal_q))**2 + theoretical_focal_size**2 )
#fwhm_th2 = 1e4*( numpy.abs(div_q*(pos-focal_q+0.5*tf_length)) + theoretical_focal_size )
out[0,i] = tf_fs_before+tf_length+pos
out[1,i] = -tf_fs_after+pos
out[2,i] = pos
out[3,i] = pos+0.5*tf_length
out[4,i] = pos+0.5*tf_length-focal_q
out[5,i] = fwhm1
out[6,i] = fwhm2
out[7,i] = fwhm_th2
f.write(" %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f \n"%\
(tf_fs_before+tf_length+pos,\
-tf_fs_after+pos,\
pos,\
pos+0.5*tf_length,\
pos+0.5*tf_length-focal_q,\
fwhm1,fwhm2,fwhm_th2))
f.close()
print("File with beam evolution written to disk: id30b.spec")
#
# plots
#
itmp = input("Do you want to plot the intensity distribution and beam evolution? [1=yes,0=No]")
if str(itmp) != "1":
return
import matplotlib.pylab as plt
plt.figure(1)
plt.plot(out[1,:],out[5,:],'blue',label="fwhm shadow(stdev)")
plt.plot(out[1,:],out[6,:],'green',label="fwhm shadow(histo1)")
plt.plot(out[1,:],out[7,:],'red',label="fwhm theoretical")
plt.xlabel("Distance from image plane [cm]")
plt.ylabel("spot size [um] ")
ax = plt.subplot(111)
ax.legend(bbox_to_anchor=(1.1, 1.05))
print("Kill graphic to continue.")
plt.show()
Shadow.ShadowTools.histo1(beam,3,nbins=75,ref=1,nolost=1,calfwhm=1)
input("<Enter> to finish.")
return None
def id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=None):
if nlenses_target == None:
force_nlenses = 0
else:
force_nlenses = 1
#
# define lens setup (general)
#
xrl_symbol = ["Be","Be","Be"]
xrl_density = [1.845,1.845,1.845]
lens_diameter = 0.05
nlenses_max = [15,3,1]
nlenses_radii = [500e-4,1000e-4,1500e-4]
sigmaz=6.46e-4
alpha = 0.55
tf_p = 5960 # position of the TF measured from the center of the transfocator
tf_q = 9760 - tf_p # position of the image plane measured from the center of the transfocator
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return
print("================================== TRANSFOCATOR INPUTS ")
print("Photon energy: %f eV"%(photon_energy_ev))
if force_nlenses:
print("Forced_nlenses: ",nlenses_target)
else:
print("target size: %f cm"%(s_target))
print("materials: ",xrl_symbol)
print("densities: ",xrl_density)
print("Lens diameter: %f cm"%(lens_diameter))
print("nlenses_max:",nlenses_max,"nlenses_radii: ",nlenses_radii)
print("Source size (sigma): %f um, FWHM: %f um"%(1e4*sigmaz,2.35*1e4*sigmaz))
print("Distances: tf_p: %f cm, tf_q: %f cm"%(tf_p,tf_q))
print("alpha: %f"%(alpha))
print("========================================================")
if force_nlenses != 1:
nlenses_target = transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, lens_diameter=lens_diameter, \
sigmaz=sigmaz, alpha=alpha, \
tf_p=tf_p,tf_q=tf_q, verbose=1)
(s_target,focal_f,focal_q,div_q) = \
transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, \
lens_diameter=lens_diameter,\
sigmaz=sigmaz, alpha=alpha,\
tf_p=tf_p,tf_q=tf_q)
slots_max = [ 1, 2, 4, 8, 1, 2, 1] # slots
slots_on_off = transfocator_nlenses_to_slots(nlenses_target,nlenses_max=nlenses_max)
print("=============================== TRANSFOCATOR SET")
#print("deltas: ",deltas)
if force_nlenses != 1:
print("nlenses_target (optimized): ",nlenses_target)
else:
print("nlenses_target (forced): ",nlenses_target)
print("With these lenses we obtain: ")
print(" focal_f: %f cm"%(focal_f))
print(" focal_q: %f cm"%(focal_q))
print(" s_target: %f um"%(s_target*1e4))
print(" slots_max: ",slots_max)
print(" slots_on_off: ",slots_on_off)
print("==================================================")
# for theoretical calculations use the focal position and distances given by the target nlenses
itmp = input("Start SHADOW simulation? [1=yes,0=No]: ")
if str(itmp) != "1":
return
#=======================================================================================================================
# Inputs
#=======================================================================================================================
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
number_of_rays = 50000
nslots = len(slots_max)
slots_lens_thickness = [0.3 for i in range(nslots)] #total thickness of a single lens in cm
# for each slot, positional gap of the first lens in cm
slots_steps = [ 4, 4, 1.9, 6.1, 4, 4, slots_lens_thickness[-1]]
slots_radii = [.05, .05, .05, .05, 0.1, 0.1, 0.15] # radii of the lenses in cm
AAA= 333
id30b_ray_tracing(emittH=emittH,emittV=emittV,betaH=betaH,betaV=betaV,number_of_rays=number_of_rays,\
density=xrl_density[0],symbol=xrl_symbol[0],tf_p=tf_p,tf_q=tf_q,lens_diameter=lens_diameter,\
slots_max=slots_max,slots_on_off=slots_on_off,photon_energy_ev=photon_energy_ev,\
slots_lens_thickness=slots_lens_thickness,slots_steps=slots_steps,slots_radii=slots_radii,\
s_target=s_target,focal_f=focal_f,focal_q=focal_q,div_q=div_q)
def main():
# this performs the full simulation: calculates the optimum configuration and do the ray-tracing
itmp = input("Enter: \n 0 = optimization calculation only \n 1 = full simulation (ray tracing) \n?> ")
photon_energy_kev = float(input("Enter photon energy in keV: "))
s_target_um = float(input("Enter target focal dimension in microns: "))
if str(itmp) == "1":
id30b_full_simulation(photon_energy_ev=photon_energy_kev*1e3,s_target=s_target_um*1e-4,nlenses_target=None)
#id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=[3,1,1])
else:
#this performs the calculation of the optimizad configuration
nlenses_optimum = transfocator_compute_configuration(photon_energy_kev*1e3,s_target_um*1e-4,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=0 )
print("Optimum lens configuration is: ",nlenses_optimum)
if nlenses_optimum == None:
return
print("Activate slots: ",transfocator_nlenses_to_slots(nlenses_optimum,nlenses_max=[15,3,1]))
# this calculates the parameters (image size, etc) for a given lens configuration
(size, f, q_f, div) = transfocator_compute_parameters(photon_energy_kev*1e3, nlenses_optimum,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 )
print("For given configuration ",nlenses_optimum," we get: ")
print(" size: %f cm, focal length: %f cm, focal distance: %f cm, divergence: %f rad: "%(size, f, q_f, div))
if __name__ == "__main__":
main() | mit | 7,014,168,836,577,493,000 | 1,108,138,286,984,486,300 | 39.224299 | 162 | 0.584905 | false |
jose187/gh_word_count | gh_word_count/__init__.py | 1 | 2681 |
from ommit_words import list_ommited_words
from re import sub
import operator
class _input_list:
def __init__(self,list_TITLES):
self.list_TITLES = list_TITLES
self.list_remove = list_ommited_words()
def _word_count(self):
# these are all the words that are in the text
dict_words = {}
# now we go through each of the lines
for str_line in self.list_TITLES:
str_raw_line1 = sub('[^a-z0-9 ]','',str_line.lower())
list_line_words = str_raw_line1.split()
for str_word in list_line_words:
# check to see if its in the ommited word list
if str_word not in self.list_remove:
# create new key if it is not there yet
if str_word not in dict_words:
dict_words[str_word] = [1]
# add if is already there
elif str_word in dict_words:
dict_words[str_word].append(1)
sorted_x = sorted(dict_words.iteritems(),
key=operator.itemgetter(1),
reverse=True)
list_OUTPUT = []
for each_item in sorted_x:
int_COUNT = sum(each_item[1])
if int_COUNT > 1:
tup_ONE_COUNT = ('%s' % each_item[0],
'%d' % int_COUNT)
list_OUTPUT.append(tup_ONE_COUNT)
return list_OUTPUT
# gets the top x according to frequency
# returns list
def _get_top(self,int_TOP):
list_TOP_N = []
for str_WORD in self._word_count()[:int_TOP]:
list_TOP_N.append(str_WORD)
return list_TOP_N
# displays the count on the terminal
def _show_count(list_TUPS,entries=0):
if entries == 0:
int_TOP = len(list_TUPS)
else:
int_TOP = entries
print 'Count\tWord\n'
for tup_ITEM in list_TUPS[:int_TOP]:
print '%d\t%s' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
# saves the count to csv file
def _save_counts(list_COUNTS,str_FILE_PATH,entries=0):
if entries == 0:
int_TOP = len(list_COUNTS)
else:
int_TOP = entries
list_OUTPUT = ['"Count","Word"']
for tup_ITEM in list_COUNTS[:int_TOP]:
str_OUTPUT = '%d,"%s"' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
list_OUTPUT.append(str_OUTPUT)
fw_OUTPUT = open(str_FILE_PATH,'w')
fw_OUTPUT.write('\n'.join(list_OUTPUT))
fw_OUTPUT.close()
| bsd-2-clause | -9,176,989,444,754,761,000 | 8,525,112,387,541,675,000 | 30.916667 | 69 | 0.497576 | false |
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/internet/_glibbase.py | 27 | 12813 | # -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides base support for Twisted to interact with the glib/gtk
mainloops.
The classes in this module should not be used directly, but rather you should
import gireactor or gtk3reactor for GObject Introspection based applications,
or glib2reactor or gtk2reactor for applications using legacy static bindings.
"""
from __future__ import division, absolute_import
import sys
from zope.interface import implementer
from twisted.internet import base, posixbase, selectreactor
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import log
def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
"""
Check whether the given modules were imported, and if requested, ensure
they will not be importable in the future.
@param moduleNames: A list of module names we make sure aren't imported.
@type moduleNames: C{list} of C{str}
@param preventImports: A list of module name whose future imports should
be prevented.
@type preventImports: C{list} of C{str}
@param errorMessage: Message to use when raising an C{ImportError}.
@type errorMessage: C{str}
@raises: C{ImportError} with given error message if a given module name
has already been imported.
"""
for name in moduleNames:
if sys.modules.get(name) is not None:
raise ImportError(errorMessage)
# Disable module imports to avoid potential problems.
for name in preventImports:
sys.modules[name] = None
class GlibWaker(posixbase._UnixWaker):
"""
Run scheduled events after waking up.
"""
def doRead(self):
posixbase._UnixWaker.doRead(self)
self.reactor._simulate()
@implementer(IReactorFDSet)
class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
Base class for GObject event loop reactors.
Notification for I/O events (reads and writes on file descriptors) is done
by the gobject-based event loop. File descriptors are registered with
gobject with the appropriate flags for read/write/disconnect notification.
Time-based events, the results of C{callLater} and C{callFromThread}, are
handled differently. Rather than registering each event with gobject, a
single gobject timeout is registered for the earliest scheduled event, the
output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
and 3.4 seconds, a single timeout is registered for 1 second in the
future. When this timeout is hit, C{_simulate} is called, which calls the
appropriate Twisted-level handlers, and a new timeout is added to gobject
by the C{_reschedule} method.
To handle C{callFromThread} events, we use a custom waker that calls
C{_simulate} whenever it wakes up.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to
GSource handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A GSource handle for the next L{simulate} call.
"""
# Install a waker that knows it needs to call C{_simulate} in order to run
# callbacks queued from a thread:
_wakerFactory = GlibWaker
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
self._glib = glib_module
self._gtk = gtk_module
posixbase.PosixReactorBase.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._pending = self._gtk.events_pending
self._iteration = self._gtk.main_iteration_do
self._crash = _mainquit
self._run = self._gtk.main
else:
self.context = self._glib.main_context_default()
self._pending = self.context.pending
self._iteration = self.context.iteration
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def _handleSignals(self):
# First, install SIGINT and friends:
base._SignalReactorMixin._handleSignals(self)
# Next, since certain versions of gtk will clobber our signal handler,
# set all signal handlers again after the event loop has started to
# ensure they're *really* set. We don't call this twice so we don't
# leak file descriptors created in the SIGCHLD initialization:
self.callLater(0, posixbase.PosixReactorBase._handleSignals, self)
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(ignored, condition):
return callback(source, condition)
fileno = source.fileno()
else:
fileno = source
wrapper = callback
return self._glib.io_add_watch(
fileno, condition, wrapper,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
def _ioEventCallback(self, source, condition):
"""
Called by event loop when an I/O event occurs.
"""
log.callWithLogger(
source, self._doReadOrWrite, source, source, condition)
return True # True = don't auto-remove the source
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
self._source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(
source, flags, self._ioEventCallback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes,
self.INFLAGS, self.OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads,
self.OUTFLAGS, self.INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
self._source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(
source, flags, self._ioEventCallback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, self.INFLAGS)
def iterate(self, delay=0):
"""
One iteration of the event loop, for trial's use.
This is not used for actual reactor runs.
"""
self.runUntilCurrent()
while self._pending():
self._iteration(0)
def crash(self):
"""
Crash the reactor.
"""
posixbase.PosixReactorBase.crash(self)
self._crash()
def stop(self):
"""
Stop the reactor.
"""
posixbase.PosixReactorBase.stop(self)
# The base implementation only sets a flag, to ensure shutting down is
# not reentrant. Unfortunately, this flag is not meaningful to the
# gobject event loop. We therefore call wakeUp() to ensure the event
# loop will call back into Twisted once this iteration is done. This
# will result in self.runUntilCurrent() being called, where the stop
# flag will trigger the actual shutdown process, eventually calling
# crash() which will do the actual gobject event loop shutdown.
self.wakeUp()
def run(self, installSignalHandlers=True):
"""
Run the reactor.
"""
self.callWhenRunning(self._reschedule)
self.startRunning(installSignalHandlers=installSignalHandlers)
if self._started:
self._run()
def callLater(self, *args, **kwargs):
"""
Schedule a C{DelayedCall}.
"""
result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
# Make sure we'll get woken up at correct time to handle this new
# scheduled call:
self._reschedule()
return result
def _reschedule(self):
"""
Schedule a glib timeout for C{_simulate}.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self._simtag = None
timeout = self.timeout()
if timeout is not None:
self._simtag = self._timeout_add(
int(timeout * 1000), self._simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
def _simulate(self):
"""
Run timers, and then reschedule glib timeout for next scheduled event.
"""
self.runUntilCurrent()
self._reschedule()
class PortableGlibReactorBase(selectreactor.SelectReactor):
"""
Base class for GObject event loop reactors that works on Windows.
Sockets aren't supported by GObject's input_add on Win32.
"""
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._glib = glib_module
self._gtk = gtk_module
selectreactor.SelectReactor.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._crash = _mainquit
self._run = self._gtk.main
else:
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def crash(self):
selectreactor.SelectReactor.crash(self)
self._crash()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self._timeout_add(0, self.simulate)
if self._started:
self._run()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.01)
if timeout is None:
timeout = 0.01
self._simtag = self._timeout_add(
int(timeout * 1000), self.simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
| mit | -6,778,284,289,687,535,000 | -8,776,052,615,347,611,000 | 31.853846 | 79 | 0.628893 | false |
konradxyz/dev_fileserver | plugins/riemann-controller/setup.py | 2 | 1329 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
setup(
name='cloudify-riemann-controller-plugin',
version='3.3a3',
author='Gigaspaces',
author_email='[email protected]',
packages=['riemann_controller',
'riemann_controller.resources'],
package_data={'riemann_controller.resources': [
'manager.config',
'deployment.config.template'
]},
license='LICENSE',
description='Plugin for creating riemann configuration'
' based on blueprint policies and starting '
' a riemann core with generated configuration',
install_requires=[
'cloudify-plugins-common==3.3a3',
'jinja2==2.7.2'
],
)
| apache-2.0 | 2,725,085,264,073,504,300 | 878,527,783,607,325,300 | 33.076923 | 77 | 0.682468 | false |
naveentata/coala-bears | tests/hypertext/BootLintBearTest.py | 24 | 2763 | from bears.hypertext.BootLintBear import BootLintBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
good_file = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js">
</script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js">
</script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button type="submit">Submit</button>
<button type="reset">Reset</button>
<button type="button">Button</button>
<div id="qunit"></div>
<ol id="bootlint"></ol>
</body>
</html>
"""
bad_file = """
<html lang="en">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js"></script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button>No type set</button>
<div>
<div class="panel-body">
<p>Something</p>
</div>
</div>
<div id="qunit"></div>
<ol id="bootlint">
<li data-lint="Found one or more `<button>`s
missing a `type` attribute."></li>
</ol>
</body>
</html>
"""
# There's a missing type in <button> tag, missing DOCTYPE
# and panel has no body.
BootLintBearTest = verify_local_bear(BootLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
BootLintBearDisableTest = verify_local_bear(
BootLintBear,
valid_files=(good_file, bad_file),
invalid_files=(),
settings={'bootlint_ignore': 'W001,W007,E001,E023'})
| agpl-3.0 | 5,730,779,710,479,395,000 | 6,643,626,710,621,523,000 | 30.758621 | 78 | 0.535288 | false |
aaronsw/watchdog | vendor/rdflib-2.4.0/rdflib/sparql/bison/Expression.py | 4 | 2821 | from Util import ListRedirect
class ParsedConditionalAndExpressionList(ListRedirect):
"""
A list of ConditionalAndExpressions, joined by '||'
"""
pyBooleanOperator = ' or '
def __init__(self,conditionalAndExprList):
if isinstance(conditionalAndExprList,list):
self._list = conditionalAndExprList
else:
self._list = [conditionalAndExprList]
def __repr__(self):
return "<ConditionalExpressionList: %s>"%self._list
class ParsedRelationalExpressionList(ListRedirect):
"""
A list of RelationalExpressions, joined by '&&'s
"""
pyBooleanOperator = ' and '
def __init__(self,relationalExprList):
if isinstance(relationalExprList,list):
self._list = relationalExprList
else:
self._list = [relationalExprList]
def __repr__(self):
return "<RelationalExpressionList: %s>"%self._list
class ParsedPrefixedMultiplicativeExpressionList(ListRedirect):
"""
A ParsedMultiplicativeExpressionList lead by a '+' or '-'
"""
def __init__(self,prefix,mulExprList):
self.prefix = prefix
assert prefix != '-',"arithmetic '-' operator not supported"
if isinstance(mulExprList,list):
self._list = mulExprList
else:
self._list = [mulExprList]
def __repr__(self):
return "%s %s"%(self.prefix,self.reduce())
class ParsedMultiplicativeExpressionList(ListRedirect):
"""
A list of UnaryExpressions, joined by '/' or '*' s
"""
def __init__(self,unaryExprList):
if isinstance(unaryExprList,list):
self._list = unaryExprList
else:
self._list = [unaryExprList]
def __repr__(self):
return "<MultiplicativeExpressionList: %s>"%self.reduce()
class ParsedAdditiveExpressionList(ListRedirect):
"""
A list of MultiplicativeExpressions, joined by '+' or '-' s
"""
def __init__(self,multiplicativeExprList):
if isinstance(multiplicativeExprList,list):
self._list = multiplicativeExprList
else:
self._list = [multiplicativeExprList]
def __repr__(self):
return "<AdditiveExpressionList: %s>"%self._list
class ParsedString(unicode):
def __init__(self,value=None):
val = value is None and u"" or value
super(ParsedString,self).__init__(val)
class ParsedDatatypedLiteral(object):
"""
Placeholder for Datatyped literals
This is neccessary (instead of instanciating Literals directly)
when datatypes IRIRefs are QNames (in which case the prefix needs to be resolved at some point)
"""
def __init__(self,value,dType):
self.value = value
self.dataType = dType
def __repr__(self):
return "'%s'^^%s"%(self.value,self.dataType) | agpl-3.0 | 2,586,982,376,312,803,000 | -1,825,685,396,336,390,400 | 32.595238 | 99 | 0.633463 | false |
languagetool-org/languagetool | languagetool-language-modules/sr/src/main/resources/org/languagetool/resource/sr/script/gettags.py | 4 | 3785 | #!/usr/bin/env python3
# coding: utf-8
"""
Program reads input file line by line, matching PoS tags. They are written
to the output file in order of appearance. Each tag is written to output
file only once.
"""
import argparse
import logging
import re
import os
import sys
_args_ = None
_logger_ = None
_out_file_ = None
LOG_FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
# Types of regex to match input, selectable from command line
REGEX_TYPE = {
"lex" : "^([!\"\'\(\),\-\.:;\?]|[a-zčćžšđâîôflnjüA-ZČĆŽŠĐ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zčćžšđâîôflnjüA-ZČĆŽŠĐ0-9_\-]+)\s+([a-zA-Z0-9\-]+)*",
"wac" : "^([a-zčćžšđâîôflnjüA-ZČĆŽŠĐ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zčćžšđâîôflnjüA-ZČĆŽŠĐ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zčćžšđâîôflnjüA-ZČĆŽŠĐ0-9_\-]+)\s+([a-zA-Z0-9\-]+)*"
}
def parse_args():
parser = argparse.ArgumentParser(description='Processes file containing Serbian word corpus.')
parser.add_argument('-b', '--base-dir', default='/tmp')
parser.add_argument('-d', '--debug', action ='store_true', default=False)
parser.add_argument('-i', '--input-file', default=None)
parser.add_argument('-n', '--first-n-lines', default=0, type=int)
parser.add_argument('-o', '--output-file', default='out.txt')
parser.add_argument('-r', '--regex', default=None)
global _args_, _logger_
_args_ = parser.parse_args()
if _args_.debug:
_logger_.setLevel( logging.DEBUG )
else:
_logger_.setLevel( logging.INFO )
_logger_.debug( "Command-line arguments: {}".format(_args_) )
if not _args_.input_file:
_logger_.error("Input file was not specified, aborting ...")
sys.exit(1)
if not _args_.regex:
sys.exit(1)
if not os.path.exists(_args_.input_file):
_logger_.error("Unable to open file '{}', aborting ...".format(_args_.input_file))
sys.exit(1)
def init():
global _logger_
logging.basicConfig(format=LOG_FORMAT)
_logger_ = logging.getLogger("lex2lt")
def open_out_file():
global _out_file_
_out_file_ = open(_args_.output_file, "w")
def close_out_file():
_out_file_.close()
# Parse input file
def parse_file():
cnt = 0
matchcnt = 0
tags = []
if _args_.regex in REGEX_TYPE:
pattern = re.compile(REGEX_TYPE[ _args_.regex ])
else:
_logger_.error("Regular expression of type '{}' does not exist in configuration, aborting ...".format(_args_.regex))
sys.exit(1)
_logger_.info("Started processing input file '{}' ...".format(_args_.input_file))
with open(_args_.input_file) as f:
for line in f:
# Remove end of line
line = line.strip()
cnt += 1
# Check if line matches regex
match = pattern.match(line)
if match:
matchcnt += 1
_logger_.debug("Matched groups: {}".format(match.groups()))
if len(match.groups()) < 4:
posgr = match.group(3)
elif len(match.groups()) < 5:
posgr = match.group(4)
_logger_.debug('posgr={}'.format(posgr))
if posgr not in tags:
tags.append( posgr )
_out_file_.write("{}\n".format(posgr))
else:
_logger_.warn("Unmatched line: {}".format(line))
if cnt > _args_.first_n_lines > 0:
break
f.close()
_logger_.info("Finished processing input file '{}': total {} lines, {} matching lines.".format(_args_.input_file, cnt, matchcnt))
if __name__ == "__main__":
init()
parse_args()
open_out_file()
parse_file()
close_out_file()
| lgpl-2.1 | -305,828,598,358,422,340 | -5,291,882,832,119,356,000 | 31.743363 | 182 | 0.557838 | false |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/kfac/examples/convnet_mnist_distributed_main.py | 15 | 2254 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
Distributed training with sync replicas optimizer. See
`convnet.train_mnist_distributed_sync_replicas` for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.contrib.kfac.examples import convnet
FLAGS = flags.FLAGS
flags.DEFINE_integer("task", -1, "Task identifier")
flags.DEFINE_string("data_dir", "/tmp/mnist", "local mnist dir")
flags.DEFINE_string(
"cov_inv_op_strategy", "chief_worker",
"In dist training mode run the cov, inv ops on chief or dedicated workers."
)
flags.DEFINE_string("master", "local", "Session master.")
flags.DEFINE_integer("ps_tasks", 2,
"Number of tasks in the parameter server job.")
flags.DEFINE_integer("replicas_to_aggregate", 5,
"Number of replicas to aggregate.")
flags.DEFINE_integer("worker_replicas", 5, "Number of replicas in worker job.")
flags.DEFINE_integer("num_epochs", None, "Number of epochs.")
def _is_chief():
"""Determines whether a job is the chief worker."""
if "chief_worker" in FLAGS.brain_jobs:
return FLAGS.brain_job_name == "chief_worker"
else:
return FLAGS.task == 0
def main(unused_argv):
_ = unused_argv
convnet.train_mnist_distributed_sync_replicas(
FLAGS.task, _is_chief(), FLAGS.worker_replicas, FLAGS.ps_tasks,
FLAGS.master, FLAGS.data_dir, FLAGS.num_epochs, FLAGS.cov_inv_op_strategy)
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 | -2,572,982,725,470,456,000 | 5,721,670,159,534,138,000 | 35.354839 | 80 | 0.692547 | false |
j-carl/ansible | lib/ansible/plugins/inventory/ini.py | 2 | 17394 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special `:modifiers`.
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are processed by Python's ast.literal_eval function
(U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Whitelisted in configuration by default.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
'''
EXAMPLES = '''# fmt: ini
# Example 1
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to parent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
# Example 2
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
'''
import ast
import re
from ansible.inventory.group import to_safe_group_name
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
'''
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
'''
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
'''
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (k, self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
'''
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
'''
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[k] = self._parse_value(v)
return hostnames, port, variables
def _expand_hostpattern(self, hostpattern):
'''
do some extra checks over normal processing
'''
# specification?
hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
if hostpattern.strip().endswith(':') and port is None:
raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
hostpattern)
for pattern in hostnames:
# some YAML parsing prevention checks
if pattern.strip() == '---':
raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
return (hostnames, port)
@staticmethod
def _parse_value(v):
'''
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
'''
try:
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
'''
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r'''^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r'''^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
| gpl-3.0 | -1,163,354,942,214,058,000 | -5,182,293,559,932,529,000 | 43.372449 | 160 | 0.597102 | false |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleDisableOpenSafeSafari.py | 1 | 4752 | ###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule DisableOpenSafeSafari
Created on Jan 22, 2015
@author: dwalker
@change: 2015-02-25 - ekkehard - Updated to make unit test work
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.DisableOpenSafeSafari import DisableOpenSafeSafari
class zzzTestRuleDisableOpenSafeSafari(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = DisableOpenSafeSafari(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
self.dc = "/usr/bin/defaults"
self.path = "com.apple.Safari"
self.key = "AutoOpenSafeDownloads"
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''This makes sure the intial report fails by executing the following
commands:
defaults write com.apple.Safari AutoOpenSafeDownloads -bool yes
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: dwalker
'''
success = False
cmd = [self.dc, "write", self.path, self.key, "-bool", "yes"]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
success = self.checkReportForRule(False, True)
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''To see what happended run these commands:
defaults read com.apple.Safari AutoOpenSafeDownloads
:param self: essential if you override this definition
:param pCompliance:
:param pRuleSuccess:
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
cmd = [self.dc, "read", self.path, self.key]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
output = self.ch.getOutputString()
return success
def checkFixForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(True, pRuleSuccess)
return success
def checkUndoForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(False, pRuleSuccess)
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | 1,568,714,737,879,791,600 | 4,510,153,432,535,661,000 | 42.2 | 82 | 0.580177 | false |
tchx84/debian-pkg-sugar | extensions/cpsection/keyboard/model.py | 11 | 6025 | # Copyright (C) 2013 Sugar Labs
# Copyright (C) 2009 OLPC
# Author: Sayamindu Dasgupta <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from gi.repository import Xkl
from gi.repository import Gio
_GROUP_NAME = 'grp' # The XKB name for group switch options
_KEYBOARD_DIR = 'org.sugarlabs.peripherals.keyboard'
_LAYOUTS_KEY = 'layouts'
_OPTIONS_KEY = 'options'
_MODEL_KEY = 'model'
class KeyboardManager(object):
def __init__(self, display):
self._engine = Xkl.Engine.get_instance(display)
self._configregistry = Xkl.ConfigRegistry.get_instance(self._engine)
self._configregistry.load(False)
self._configrec = Xkl.ConfigRec()
self._configrec.get_from_server(self._engine)
self._settings = Gio.Settings(_KEYBOARD_DIR)
def _populate_one(self, config_registry, item, store):
store.append([item.get_description(), item.get_name()])
def _populate_two(self, config_registry, item, subitem, store):
layout = item.get_name()
if subitem:
description = '%s, %s' % (subitem.get_description(),
item.get_description())
variant = subitem.get_name()
else:
description = 'Default layout, %s' % item.get_description()
variant = ''
store.append([description, ('%s(%s)' % (layout, variant))])
def get_models(self):
"""Return list of supported keyboard models"""
models = []
self._configregistry.foreach_model(self._populate_one, models)
models.sort()
return models
def get_languages(self):
"""Return list of supported keyboard languages"""
languages = []
self._configregistry.foreach_language(self._populate_one, languages)
languages.sort()
return languages
def get_layouts_for_language(self, language):
"""Return list of supported keyboard layouts for a given language"""
layouts = []
self._configregistry.foreach_language_variant(language,
self._populate_two,
layouts)
layouts.sort()
return layouts
def get_options_group(self):
"""Return list of supported options for switching keyboard group"""
options = []
self._configregistry.foreach_option(_GROUP_NAME, self._populate_one,
options)
options.sort()
return options
def get_current_model(self):
"""Return the enabled keyboard model"""
model = self._settings.get_string(_MODEL_KEY)
if not model:
model = self._configrec.model
self.set_model(model)
return model
def get_current_layouts(self):
"""Return the enabled keyboard layouts with variants"""
layouts = self._settings.get_strv(_LAYOUTS_KEY)
if layouts:
return layouts
layouts = self._configrec.layouts
variants = self._configrec.variants
layout_list = []
i = 0
for layout in layouts:
if len(variants) <= i or variants[i] == '':
layout_list.append('%s(%s)' % (layout, ''))
else:
layout_list.append('%s(%s)' % (layout, variants[i]))
i += 1
self.set_layouts(layout_list)
return layout_list
def get_current_option_group(self):
"""Return the enabled option for switching keyboard group"""
options = self._settings.get_strv(_OPTIONS_KEY)
if not options:
options = self._configrec.options
self.set_option_group(options)
for option in options:
if option.startswith(_GROUP_NAME):
return option
return None
def get_max_layouts(self):
"""Return the maximum number of layouts supported simultaneously"""
return self._engine.get_max_num_groups()
def set_model(self, model):
"""Sets the supplied keyboard model"""
if model is None or not model:
return
self._settings.set_string(_MODEL_KEY, model)
self._configrec.set_model(model)
self._configrec.activate(self._engine)
def set_option_group(self, option_group):
"""Sets the supplied option for switching keyboard group"""
# XXX: Merge, not overwrite previous options
if not option_group:
options = ['']
elif isinstance(option_group, list):
options = option_group
else:
options = [option_group]
self._settings.set_strv(_OPTIONS_KEY, options)
self._configrec.set_options(options)
self._configrec.activate(self._engine)
def set_layouts(self, layouts):
"""Sets the supplied keyboard layouts (with variants)"""
if layouts is None or not layouts:
return
self._settings.set_strv(_LAYOUTS_KEY, layouts)
layouts_list = []
variants_list = []
for layout in layouts:
layouts_list.append(layout.split('(')[0])
variants_list.append(layout.split('(')[1][:-1])
self._configrec.set_layouts(layouts_list)
self._configrec.set_variants(variants_list)
self._configrec.activate(self._engine)
| gpl-2.0 | -4,606,906,259,930,323,000 | -3,495,901,644,737,979,400 | 34.650888 | 76 | 0.607303 | false |
yanchen036/tensorflow | tensorflow/contrib/learn/python/learn/learn_runner_test.py | 76 | 16067 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""learn_main tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.contrib.learn.python.learn import evaluable # pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.training.python.training import hparam as hparam_lib
from tensorflow.python.estimator import run_config as core_run_config_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
patch = test.mock.patch
_MODIR_DIR = "/tmp"
_HPARAMS = hparam_lib.HParams(learning_rate=0.01)
_MUST_SPECIFY_OUTPUT_DIR_MSG = "Must specify an output directory"
_MISSING_MODEL_DIR_ERR_MSG = (
"Must specify a model directory `model_dir` in `run_config`.")
_EXP_NOT_CALLABLE_MSG = "Experiment builder .* is not callable"
_INVALID_HPARAMS_ERR_MSG = "`hparams` must be `HParams` instance"
_NOT_EXP_TYPE_MSG = "Experiment builder did not return an Experiment"
_NON_EXIST_TASK_MSG = "Schedule references non-existent task"
_NON_CALLABLE_MSG = "Schedule references non-callable member"
_MUST_SPECIFY_OUTPUT_DIR_OR_CONFIG_MSG = (
"Must set value for `output_dir` or `run_config`")
_HPARAMS_CANNOT_BE_SET_FOR_OUTPUT_DIR_MSG = (
"Must set `hparams` as None for `experiment_fn` with `output_dir`.")
_CANNOT_SET_BOTH_OUTPUT_DIR_AND_CONFIG_MSG = (
"Cannot provide both `output_dir` and `run_config`")
_INVALID_RUN_CONFIG_TYPE_MSG = (
"`run_config` must be `tf.contrib.learn.RunConfig` instance")
_RUN_CONFIG_UID_CHECK_ERR_MSG = (
"`RunConfig` instance is expected to be used by the `Estimator`")
_MISSING_RUN_CONFIG_UID_ERR_MSG = (
"Pass `run_config` argument of the `experiment_fn` to the Estimator")
class TestExperiment(experiment.Experiment):
def __init__(self, default=None, config=None, model_dir=None):
self.default = default
self.config = config
internal_model_dir = model_dir or config.model_dir
self._model_dir = internal_model_dir
class Estimator(evaluable.Evaluable, trainable.Trainable):
config = self.config
@property
def model_dir(self):
return internal_model_dir
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
raise NotImplementedError
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None, hooks=None):
raise NotImplementedError
super(TestExperiment, self).__init__(Estimator(), None, None)
def local_run(self):
return "local_run-{}".format(self._model_dir)
def train(self):
return "train-{}".format(self._model_dir)
def run_std_server(self):
return "run_std_server-{}".format(self._model_dir)
def train_and_evaluate(self):
return "train_and_evaluate-{}".format(self._model_dir)
def simple_task(self):
return "simple_task, default=%s." % self.default
# pylint: disable=unused-argument
def build_experiment(output_dir):
tf_logging.info("In default build_experiment.")
return TestExperiment(model_dir=output_dir)
def build_experiment_fn_for_output_dir(run_config=None):
def _build_experiment(output_dir):
tf_logging.info("In default build_experiment.")
return TestExperiment(config=run_config, model_dir=output_dir)
return _build_experiment
def build_experiment_for_run_config(run_config, hparams):
if hparams is not None and hparams != _HPARAMS:
raise ValueError("hparams is not set correctly")
return TestExperiment(config=run_config)
def build_non_experiment(output_dir):
return "Ceci n'est pas un Experiment."
# pylint: enable=unused-argument
def build_distributed_cluster_spec():
return {
run_config_lib.TaskType.PS: ["localhost:1234", "localhost:1235"],
run_config_lib.TaskType.WORKER: ["localhost:1236", "localhost:1237"],
run_config_lib.TaskType.MASTER: ["localhost:1238"],
"foo_has_no_default_schedule": ["localhost:1239"]
}
def build_non_distributed_cluster_spec():
return {"foo": ["localhost:1234"]}
class LearnRunnerRunWithOutputDirTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_run_with_custom_schedule(self):
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="simple_task"))
def test_run_with_explicit_local_run(self):
self.assertEqual(
"local_run-" + _MODIR_DIR,
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="local_run"))
def test_fail_output_dir_and_run_config_are_both_set(self):
with self.assertRaisesRegexp(
ValueError, _CANNOT_SET_BOTH_OUTPUT_DIR_AND_CONFIG_MSG):
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="simple_task",
run_config=run_config_lib.RunConfig())
def test_fail_empty_output_dir(self):
with self.assertRaisesRegexp(ValueError, _MUST_SPECIFY_OUTPUT_DIR_MSG):
learn_runner.run(build_experiment, output_dir="", schedule="simple_task")
def test_fail_no_output_dir(self):
with self.assertRaisesRegexp(
ValueError, _MUST_SPECIFY_OUTPUT_DIR_OR_CONFIG_MSG):
learn_runner.run(build_experiment, None, "simple_task")
def test_fail_hparams_are_set(self):
hparams = _HPARAMS
with self.assertRaisesRegexp(
ValueError, _HPARAMS_CANNOT_BE_SET_FOR_OUTPUT_DIR_MSG):
learn_runner.run(
build_experiment, _MODIR_DIR, schedule="simple_task", hparams=hparams)
def test_fail_non_callable(self):
with self.assertRaisesRegexp(TypeError, _EXP_NOT_CALLABLE_MSG):
learn_runner.run("not callable", _MODIR_DIR, "simple_test")
def test_fail_not_experiment(self):
with self.assertRaisesRegexp(TypeError, _NOT_EXP_TYPE_MSG):
learn_runner.run(build_non_experiment, _MODIR_DIR, "simple_test")
def test_fail_non_existent_task(self):
with self.assertRaisesRegexp(ValueError, _NON_EXIST_TASK_MSG):
learn_runner.run(build_experiment, _MODIR_DIR, "mirage")
def test_fail_non_callable_task(self):
with self.assertRaisesRegexp(TypeError, _NON_CALLABLE_MSG):
learn_runner.run(build_experiment, _MODIR_DIR, "default")
class LearnRunnerRunWithRunConfigTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_run_with_custom_schedule(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="simple_task"))
def test_run_with_hparams(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="simple_task",
hparams=_HPARAMS))
def test_run_with_explicit_local_run(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"local_run-" + _MODIR_DIR,
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run"))
def test_fail_empty_output_dir(self):
run_config = run_config_lib.RunConfig(model_dir="")
with self.assertRaisesRegexp(ValueError, _MISSING_MODEL_DIR_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_no_output_dir(self):
run_config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _MISSING_MODEL_DIR_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_invalid_run_config_type(self):
run_config = "invalid_run_config"
with self.assertRaisesRegexp(ValueError, _INVALID_RUN_CONFIG_TYPE_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_invalid_hparams_type(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(ValueError, _INVALID_HPARAMS_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run",
hparams=["hparams"])
def test_fail_non_callable(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _EXP_NOT_CALLABLE_MSG):
learn_runner.run("not callable",
run_config=run_config,
schedule="simple_task")
def test_fail_not_experiment(self):
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
return "not experiment"
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _NOT_EXP_TYPE_MSG):
learn_runner.run(_experiment_fn,
run_config=run_config,
schedule="simple_task")
def test_fail_non_existent_task(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(ValueError, _NON_EXIST_TASK_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="mirage")
def test_fail_non_callable_task(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _NON_CALLABLE_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="default")
def test_basic_run_config_uid_check(self):
expected_run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
# Explicitly use a new run_config.
new_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR + "/123")
return TestExperiment(config=new_config)
with self.assertRaisesRegexp(RuntimeError, _RUN_CONFIG_UID_CHECK_ERR_MSG):
learn_runner.run(experiment_fn=_experiment_fn,
run_config=expected_run_config)
def test_fail_invalid_experiment_config_type(self):
expected_run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
# Explicitly use a new run_config without `uid` method.
new_config = core_run_config_lib.RunConfig(
model_dir=_MODIR_DIR + "/123")
return TestExperiment(config=new_config)
with self.assertRaisesRegexp(RuntimeError,
_MISSING_RUN_CONFIG_UID_ERR_MSG):
learn_runner.run(experiment_fn=_experiment_fn,
run_config=expected_run_config)
class LearnRunnerDefaultScheduleTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_schedule_from_tf_config_runs_train_on_worker(self):
os.environ["TF_CONFIG"] = json.dumps({
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.WORKER
}
})
# RunConfig constructor will set job_name from TF_CONFIG.
config = run_config_lib.RunConfig()
self.assertEqual(
"train-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_schedule_from_tf_config_runs_train_and_evaluate_on_master(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.MASTER
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_schedule_from_tf_config_runs_serve_on_ps(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.PS
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"run_std_server-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_no_schedule_and_no_config_runs_train_and_evaluate(self):
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(build_experiment, output_dir=_MODIR_DIR))
def test_no_schedule_and_non_distributed_runs_train_and_evaluate(self):
tf_config = {"cluster": build_non_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_fail_task_type_with_no_default_schedule(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": "foo_has_no_default_schedule"
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
create_experiment_fn = lambda output_dir: TestExperiment(config=config)
self.assertRaisesRegexp(ValueError,
"No default schedule",
learn_runner.run,
create_experiment_fn,
_MODIR_DIR)
def test_fail_schedule_from_config_with_no_task_type(self):
tf_config = {"cluster": build_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertRaisesRegexp(
ValueError,
"Must specify a schedule",
learn_runner.run,
lambda output_dir: TestExperiment(config=config),
output_dir=_MODIR_DIR)
if __name__ == "__main__":
test.main()
| apache-2.0 | -852,439,067,944,284,800 | -5,897,772,941,955,822,000 | 37.163895 | 98 | 0.650277 | false |
HomeRad/TorCleaner | wc/filter/rules/FolderRule.py | 1 | 3945 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2009 Bastian Kleineidam
"""
Group filter rules into folders.
"""
from ... import fileutil, configuration
from . import Rule
def recalc_up_down(rules):
"""
Add .up and .down attributes to rules, used for display up/down
arrows in GUIs
"""
upper = len(rules)-1
for i, rule in enumerate(rules):
rule.up = (i>0)
rule.down = (i<upper)
class FolderRule(Rule.Rule):
"""
Container for a list of rules.
"""
def __init__(self, sid=None, titles=None, descriptions=None,
disable=0, filename=""):
"""
Initialize rule data.
"""
super(FolderRule, self).__init__(sid=sid, titles=titles,
descriptions=descriptions, disable=disable)
# make filename read-only
self._filename = filename
self.rules = []
self.attrnames.extend(('oid', 'configversion'))
self.intattrs.append('oid')
self.oid = None
self.configversion = "-"
def __str__(self):
"""
Return rule data as string.
"""
return super(FolderRule, self).__str__() + \
("\nrules: %d" % len(self.rules))
def filename_get(self):
"""
Get filename where this folder is stored.
"""
return self._filename
filename = property(filename_get)
def append_rule(self, r):
"""
Append rule to folder.
"""
r.oid = len(self.rules)
# note: the rules are added in order
self.rules.append(r)
r.parent = self
def delete_rule(self, i):
"""
Delete rule from folder with index i.
"""
del self.rules[i]
recalc_up_down(self.rules)
def update(self, rule, dryrun=False, log=None):
"""
Update this folder with given folder rule data.
"""
chg = super(FolderRule, self).update(rule, dryrun=dryrun, log=log)
for child in rule.rules:
if child.sid is None or not child.sid.startswith("wc"):
# ignore local rules
continue
oldrule = self.get_rule(child.sid)
if oldrule is not None:
if oldrule.update(child, dryrun=dryrun, log=log):
chg = True
else:
print >> log, _("inserting new rule %s") % \
child.tiptext()
if not dryrun:
self.rules.append(child)
chg = True
if chg:
recalc_up_down(self.rules)
return chg
def get_rule(self, sid):
"""
Return rule with given sid or None if not found.
"""
for rule in self.rules:
if rule.sid == sid:
return rule
return None
def toxml(self):
"""
Rule data as XML for storing.
"""
s = u"""<?xml version="1.0" encoding="%s"?>
<!DOCTYPE folder SYSTEM "filter.dtd">
%s oid="%d" configversion="%s">""" % \
(configuration.ConfigCharset, super(FolderRule, self).toxml(),
self.oid, self.configversion)
s += u"\n"+self.title_desc_toxml()+u"\n"
for r in self.rules:
s += u"\n%s\n" % r.toxml()
return s+u"</folder>\n"
def write(self, fd=None):
"""
Write xml data into filename.
@raise: OSError if file could not be written.
"""
s = self.toxml().encode("iso-8859-1", "replace")
if fd is None:
fileutil.write_file(self.filename, s)
else:
fd.write(s)
def tiptext(self):
"""
Return short info for gui display.
"""
l = len(self.rules)
if l == 1:
text = _("with 1 rule")
else:
text = _("with %d rules") % l
return "%s %s" % (super(FolderRule, self).tiptext(), text)
| gpl-2.0 | 4,796,869,314,092,652,000 | -2,506,553,962,442,797,600 | 27.79562 | 78 | 0.509759 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/json/tool.py | 11 | 1463 | r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
import argparse
import json
import sys
def main():
prog = 'python -m json.tool'
description = ('A simple command line interface for json module '
'to validate and pretty-print JSON objects.')
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('infile', nargs='?', type=argparse.FileType(),
help='a JSON file to be validated or pretty-printed')
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
help='write the output of infile to outfile')
parser.add_argument('--sort-keys', action='store_true', default=False,
help='sort the output of dictionaries alphabetically by key')
options = parser.parse_args()
infile = options.infile or sys.stdin
outfile = options.outfile or sys.stdout
sort_keys = options.sort_keys
with infile:
try:
obj = json.load(infile)
except ValueError as e:
raise SystemExit(e)
with outfile:
json.dump(obj, outfile, sort_keys=sort_keys, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| apache-2.0 | 1,751,781,764,006,186,800 | -4,854,068,214,486,041,000 | 31.511111 | 85 | 0.611073 | false |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/pygments/lexers/dsls.py | 72 | 18768 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix',
'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable',
'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace',
'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error',
'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape',
'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next',
'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long',
'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string',
'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
| bsd-3-clause | 2,176,361,002,025,734,400 | 2,534,694,977,092,276,000 | 35.513619 | 97 | 0.42322 | false |
skg-net/ansible | lib/ansible/modules/cloud/amazon/ec2_snapshot.py | 71 | 9675 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
type: bool
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,505,991,583,135,495,000 | 1,261,716,869,289,571,300 | 30.825658 | 139 | 0.637416 | false |
Hybrid-Cloud/cinder | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py | 5 | 5821 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
@ddt.ddt
class TestCreateVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestCreateVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(ctx)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume.name: '"{}"'.format(self.volume.id),
'types/Volume/instances': {'id': self.volume.id},
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME:
'"{}"'.format(self.PROT_DOMAIN_ID),
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): '"{}"'.format(self.STORAGE_POOL_ID),
},
self.RESPONSE_MODE.Invalid: {
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: None,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): None,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances': self.BAD_STATUS_RESPONSE,
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): self.BAD_STATUS_RESPONSE,
},
}
def test_no_domain(self):
"""No protection domain name or ID provided."""
self.driver.protection_domain_name = None
self.driver.protection_domain_id = None
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
def test_no_domain_id(self):
"""Only protection domain name provided."""
self.driver.protection_domain_id = None
self.driver.protection_domain_name = self.PROT_DOMAIN_NAME
self.driver.storage_pool_name = None
self.driver.storage_pool_id = self.STORAGE_POOL_ID
self.test_create_volume()
def test_no_domain_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_domain_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_storage_id(self):
"""Only protection domain name provided."""
self.driver.storage_pool_id = None
self.driver.storage_pool_name = self.STORAGE_POOL_NAME
self.driver.protection_domain_id = self.PROT_DOMAIN_ID
self.driver.protection_domain_name = None
self.test_create_volume()
def test_no_storage_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_no_storage_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_create_volume(self):
"""Valid create volume parameters"""
self.driver.create_volume(self.volume)
def test_create_volume_non_8_gran(self):
self.volume.size = 14
model_update = self.driver.create_volume(self.volume)
self.assertEqual(16, model_update['size'])
def test_create_volume_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
@ddt.data({'provisioning:type': 'thin'}, {'provisioning:type': 'thin'})
def test_create_thin_thick_volume(self, extraspecs):
self.driver._get_volumetype_extraspecs = mock.MagicMock()
self.driver._get_volumetype_extraspecs.return_value = extraspecs
self.driver.create_volume(self.volume)
def test_create_volume_bad_provisioning_type(self):
extraspecs = {'provisioning:type': 'other'}
self.driver._get_volumetype_extraspecs = mock.MagicMock()
self.driver._get_volumetype_extraspecs.return_value = extraspecs
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
| apache-2.0 | 5,965,655,206,212,823,000 | 2,102,275,241,866,696,200 | 41.181159 | 78 | 0.627212 | false |
RAJSD2610/SDNopenflowSwitchAnalysis | TotalFlowPlot.py | 1 | 2742 | import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/udpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u8=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/udpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/udpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/udpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u.append(y)
i+=1
print(u)
path= os.path.expanduser("~/Desktop/ece671/tcpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/tcpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t8.append(y)
i+=1
print(t8)
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u)+1)),u, '.-',label="udpnone")
plt.plot(list(range(1,len(t)+1)),t, '.-',label="tcpnone")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.title("Total Flows Present after 1st flow")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| gpl-3.0 | 5,928,263,085,379,996,000 | 8,745,470,005,242,735,000 | 24.388889 | 86 | 0.591174 | false |
Stratio/cassandra | test/system/test_thrift_server.py | 10 | 107002 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to run a single test, run from trunk/:
# PYTHONPATH=test nosetests --tests=system.test_thrift_server:TestMutations.test_empty_range
import os, sys, time, struct, uuid, re
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
def cas(expected, updates):
return client.cas('key1', 'Standard1', expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
cas_result = cas(_SIMPLE_COLUMNS, _SIMPLE_COLUMNS)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
assert cas([], _SIMPLE_COLUMNS).success
result = [cosc.column for cosc in _big_slice('key1', ColumnParent('Standard1'))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
cas_result = cas([], _SIMPLE_COLUMNS)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == { _SIMPLE_COLUMNS[0].name : _SIMPLE_COLUMNS[0].value }, cas_result
# CL.SERIAL for reads
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.SERIAL).column.value == 'value1'
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange('', '', False, count))
_set_keyspace('Keyspace1')
key = 'key1'
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in xrange(0, 3050):
client.insert(key, parent, Column(str(i), '', 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_slice_standard(self):
_set_keyspace('Keyspace1')
columns = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0),
Column('c3', 'value3', 0),
Column('c4', 'value4', 0),
Column('c5', 'value5', 0)]
for column in columns:
client.insert('key', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='c2', finish='c5')))
client.batch_mutate({'key': {'Standard1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Standard1', column='c1'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c2'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c3'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c4'))
_assert_columnpath_exists('key', ColumnPath('Standard1', column='c5'))
def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
_set_keyspace('Keyspace1')
columns = [SuperColumn(name='sc1', columns=[Column(_i64(1), 'value1', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(2), 'value2', 0), Column(_i64(3), 'value3', 0)]),
SuperColumn(name='sc3', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc4',
columns=[Column(_i64(5), 'value5', 0), Column(_i64(6), 'value6', 0)]),
SuperColumn(name='sc5', columns=[Column(_i64(7), 'value7', 0)])]
for column in columns:
for subcolumn in column.columns:
client.insert('key', ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='sc2', finish='sc5')))
client.batch_mutate({'key': {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(2)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(3)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc3', column=_i64(4)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(5)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(6)))
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc5', column=_i64(7)))
def test_batch_mutate_remove_slice_part_of_supercolumns(self):
_set_keyspace('Keyspace1')
columns = [Column(_i64(1), 'value1', 0),
Column(_i64(2), 'value2', 0),
Column(_i64(3), 'value3', 0),
Column(_i64(4), 'value4', 0),
Column(_i64(5), 'value5', 0)]
for column in columns:
client.insert('key', ColumnParent('Super1', 'sc1'), column, ConsistencyLevel.ONE)
r = SliceRange(start=_i64(2), finish=_i64(5))
d = Deletion(1, super_column='sc1', predicate=SlicePredicate(slice_range=r))
client.batch_mutate({'key': {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(2)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(3)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(4)))
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(5)))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 5, kspaces # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = client.describe_token_map().items()
assert len(ring) == 1
token, node = ring[0]
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(3)), IndexExpression('birthdate', IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange('', '', None, None, [IndexExpression(u, IndexOperator.EQ, 'a'), IndexExpression(u2, IndexOperator.EQ, 'b')], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
| apache-2.0 | 7,815,207,687,508,891,000 | -5,536,122,940,355,650,000 | 50.393852 | 623 | 0.621278 | false |
mattskone/garage-alarm | exploring/iso_ss.py | 2 | 1392 | """
Utility script for experimenting with different ISO and shutter speeds.
"""
from fractions import Fraction
import sys
import time
from picamera import PiCamera
def get_camera():
camera = PiCamera()
camera.hflip = True
camera.vflip = True
return camera
def custom():
c = get_camera()
iso = int(sys.argv[1])
ss = int(sys.argv[2])
c.framerate = Fraction(1000000, ss)
c.ss = ss
c.iso = iso
print 'Capturing with ISO {0}, shutter speed {1}, and frame rate {2}'.format(iso, ss, c.framerate)
time.sleep(10)
c.exposure_mode = 'off'
c.capture('temp/temp.jpg')
c.close()
def main():
iso_list = [100, 200, 400, 800]
ss_list = [50000, 500000, 2500000, 5000000]
for iso in iso_list:
for ss in ss_list:
c = get_camera()
c.framerate = Fraction(1000000, ss)
c.ss = ss
c.iso = iso
time.sleep(10)
c.exposure_mode = 'off'
print 'Capturing with ISO {0}, shutter speed {1}, and frame rate {2}'.format(iso, ss, c.framerate)
print 'Gains: {0}'.format(c.awb_gains)
filename = 'test_{0}_{1}_{2}.jpg'.format(iso, ss, float(c.framerate))
c.capture('temp/' + filename)
c.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
custom()
else:
main()
print 'Done'
| mit | 7,741,438,013,600,029,000 | -4,419,454,503,547,280,400 | 23 | 110 | 0.559626 | false |
codegooglecom/jaikuengine | common/test/sms.py | 34 | 3749 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from django.conf import settings
from django.core import mail
from common import api
from common import clean
from common import exception
from common import profile
from common import sms as sms_service
from common.protocol import sms
from common.test import base
from common.test import util as test_util
class SmsTest(base.FixturesTestCase):
sender = '+14084900694'
target = settings.SMS_TARGET
def setUp(self):
super(SmsTest, self).setUp()
# this is actually a TestSmsConnection instance, overriden by the base
# classes
self.service = sms_service.SmsService(sms.SmsConnection())
self.service.init_handlers()
def receive(self, message, sender=None, target=None):
if sender is None:
sender = self.sender
if target is None:
target = self.target
self.service.handle_message(sender, target, message)
self.exhaust_queue_any()
outbox = sms.outbox[:]
sms.outbox = []
return outbox
def assertOutboxContains(self, outbox, pattern, sender=None):
if sender is None:
sender = self.sender
if type(pattern) is type(''):
pattern = re.compile(pattern)
for mobile, message in outbox:
if mobile == sender and pattern.search(message):
return True
self.fail('Not in outbox: /%s/ \n %s' % (pattern.pattern, outbox))
def sign_in(self, nick, sender=None):
password = self.passwords[clean.nick(nick)]
r = self.receive('SIGN IN %s %s' % (nick, password), sender=sender)
return r
# Note: all of these tests assume that double opt-in for the user
# has already been completed
def test_sign_in(self):
nick = 'popular'
password = self.passwords[clean.nick(nick)]
r = self.receive('SIGN IN %s %s' % (nick, password))
self.assertOutboxContains(r, 'Welcome to %s SMS %s' % (settings.SITE_NAME, nick))
def test_sign_on(self):
self.sign_in('popular')
r = self.receive('SIGN OUT')
self.assertOutboxContains(r, sms_service.HELP_SIGNED_OUT)
r = self.receive('SIGN OUT')
self.assertOutboxContains(r, sms_service.HELP_SIGN_IN)
def test_post_and_reply(self):
unpop = '+14083839393'
r = self.sign_in('unpopular', sender=unpop)
r = self.receive('on', sender=unpop)
r = self.sign_in('popular')
r = self.receive('on')
r = self.receive('bling blao')
self.assertOutboxContains(r, 'popular: bling blao', sender=unpop)
r = self.receive('@popular: sup dawg', sender=unpop)
self.assertOutboxContains(r, 'unpopular\^bb: sup dawg')
def test_whitelist(self):
o = test_util.override(SMS_MT_WHITELIST=re.compile('\+23'))
def _all_blocked():
r = self.sign_in('popular')
self.assertRaises(exception.ServiceError, _all_blocked)
r = self.sign_in('popular', '+2345678900')
self.assert_(r)
o.reset()
def test_blacklist(self):
o = test_util.override(SMS_MT_BLACKLIST=re.compile('\+1'))
def _all_blocked():
r = self.sign_in('popular')
self.assertRaises(exception.ServiceError, _all_blocked)
r = self.sign_in('popular', '+2345678900')
self.assert_(r)
o.reset()
| apache-2.0 | -9,003,434,461,605,124,000 | -119,846,457,286,263,800 | 27.401515 | 85 | 0.677781 | false |
thjashin/tensorflow | tensorflow/python/framework/common_shapes.py | 40 | 26355 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op,
input_tensors_needed=None,
input_tensors_as_shapes_needed=None,
debug_python_shape_fn=None,
require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
input_tensors_needed: a list of input tensor indices for which to compute
the input tensor's value and pass to the C++ shape function.
input_tensors_as_shapes_needed: a list of input tensor indices for which to
compute the constant_value_as_shape and pass to the C++ shape function.
debug_python_shape_fn: For testing only during migration to using
call_cpp_shape_fn. Do not submit calls that set this,
as the comparison is slow. If non-None, the python shape function;
this function will be called and its output compared to that of
the C++ shape function.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
"handle_dtypes": [types_pb2.DT_INVALID]
}
input_tensors_needed = input_tensors_needed or []
input_tensors_as_shapes_needed = input_tensors_as_shapes_needed or []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn):
"""Core implementaton of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
r.handle_shape.CopyFrom(t._handle_shape)
r.handle_dtype = t._handle_dtype
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
with errors.raise_exception_on_not_ok_status() as status:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes, status)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_shapes = [r.handle_shape for r in result_protos]
result_handle_dtypes = [r.handle_dtype for r in result_protos]
if debug_python_shape_fn:
try:
python_result = [tensor_shape.as_shape(s)
for s in debug_python_shape_fn(op)]
except Exception as err:
raise AssertionError("Python shape function return error but "
"C++ shape functon did not: %s" % str(err))
result_as_shapes = [tensor_shape.as_shape(s) for s in result]
if str(result_as_shapes) != str(python_result):
raise ValueError(
("Python vs CPP shape mismatch. "
"CPP: %s vs python: %s on node %s "
"with input shapes %s") % (
str(result_as_shapes), str(python_result), str(op.node_def),
",".join([str(i.get_shape()) for i in op.inputs])))
return {"shapes": result,
"handle_shapes": result_handle_shapes,
"handle_dtypes": result_handle_dtypes,
"inputs_needed": output[-1]}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
| apache-2.0 | 2,250,249,765,722,709,000 | -4,874,039,929,552,919,000 | 35.706128 | 80 | 0.655018 | false |
Designist/sympy | sympy/functions/elementary/tests/test_complexes.py | 15 | 25268 | from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise,
Interval, comp, Integral)
from sympy.utilities.pytest import XFAIL, raises
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
assert re(x).rewrite(im) == x - im(x)
assert (x + re(y)).rewrite(re, im) == x + y - im(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
assert im(x).rewrite(re) == x - re(x)
assert (x + im(y)).rewrite(im, re) == x + y - re(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
assert sign(im(sqrt(1 - sqrt(3)))) == 1
assert sign(sqrt(1 - sqrt(3))) == I
x = Symbol('x')
assert sign(x).is_finite is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
assert sign(Symbol('x', nonnegative=True)).is_nonnegative
assert sign(Symbol('x', nonnegative=True)).is_nonpositive is None
assert sign(Symbol('x', nonpositive=True)).is_nonnegative is None
assert sign(Symbol('x', nonpositive=True)).is_nonpositive
assert sign(Symbol('x', real=True)).is_nonnegative is None
assert sign(Symbol('x', real=True)).is_nonpositive is None
assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
assert sign(x).rewrite(Heaviside) == 2*Heaviside(x)-1
assert sign(y).rewrite(Heaviside) == sign(y)
# evaluate what can be evaluated
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert sign(eq).func is sign or sign(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 6261
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 3853
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, abs(i))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
raises(TypeError, lambda: Abs(Interval(2, 3))) # issue 8717
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
assert Abs(x)**-3 == Abs(x)/(x**4)
assert Abs(x**3) == x**2*Abs(x)
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when you can and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert abs(eq).func is Abs or abs(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
assert Abs(4*exp(pi*I/4)) == 4
assert Abs(3**(2 + I)) == 9
assert Abs((-3)**(1 - I)) == 3*exp(pi)
assert Abs(oo) is oo
assert Abs(-oo) is oo
assert Abs(oo + I) is oo
assert Abs(oo + I*oo) is oo
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
assert Abs(y).rewrite(sign) == y/sign(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_rational is None
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
z = Symbol('z', complex=True, zero=False)
assert Abs(z).is_real is True
assert Abs(z).is_rational is None
assert Abs(z).is_positive is True
assert Abs(z).is_zero is False
p = Symbol('p', positive=True)
assert Abs(p).is_real is True
assert Abs(p).is_rational is None
assert Abs(p).is_positive is True
assert Abs(p).is_zero is False
q = Symbol('q', rational=True)
assert Abs(q).is_rational is True
assert Abs(q).is_integer is None
assert Abs(q).is_positive is None
assert Abs(q).is_nonnegative is True
i = Symbol('i', integer=True)
assert Abs(i).is_integer is True
assert Abs(i).is_positive is None
assert Abs(i).is_nonnegative is True
e = Symbol('n', even=True)
ne = Symbol('ne', real=True, even=False)
assert Abs(e).is_even
assert Abs(ne).is_even is False
assert Abs(i).is_even is None
o = Symbol('n', odd=True)
no = Symbol('no', real=True, odd=False)
assert Abs(o).is_odd
assert Abs(no).is_odd is False
assert Abs(i).is_odd is None
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
f = Function('f')
assert not arg(f(0) + I*f(1)).atoms(re)
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
e = p + I*p**2
assert arg(e) == arg(1 + p*I)
# make sure sign doesn't swap
e = -2*p + 4*I*p**2
assert arg(e) == arg(-1 + 2*p*I)
# make sure sign isn't lost
x = symbols('x', real=True) # could be zero
e = x + I*x
assert arg(e) == arg(x*(1 + I))
assert arg(e/p) == arg(x*(1 + I))
e = p*cos(p) + I*log(p)*exp(p)
assert arg(e).args[0] == e
# keep it simple -- let the user do more advanced cancellation
e = (p + 1) + I*(p**2 - 1)
assert arg(e).args[0] == e
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_polarify():
from sympy import polar_lift, polarify
x = Symbol('x')
z = Symbol('z', polar=True)
f = Function('f')
ES = {}
assert polarify(-1) == (polar_lift(-1), ES)
assert polarify(1 + I) == (polar_lift(1 + I), ES)
assert polarify(exp(x), subs=False) == exp(x)
assert polarify(1 + x, subs=False) == 1 + x
assert polarify(f(I) + x, subs=False) == f(polar_lift(I)) + x
assert polarify(x, lift=True) == polar_lift(x)
assert polarify(z, lift=True) == z
assert polarify(f(x), lift=True) == f(polar_lift(x))
assert polarify(1 + x, lift=True) == polar_lift(1 + x)
assert polarify(1 + f(x), lift=True) == polar_lift(1 + f(polar_lift(x)))
newex, subs = polarify(f(x) + z)
assert newex.subs(subs) == f(x) + z
mu = Symbol("mu")
sigma = Symbol("sigma", positive=True)
# Make sure polarify(lift=True) doesn't try to lift the integration
# variable
assert polarify(
Integral(sqrt(2)*x*exp(-(-mu + x)**2/(2*sigma**2))/(2*sqrt(pi)*sigma),
(x, -oo, oo)), lift=True) == Integral(sqrt(2)*(sigma*exp_polar(0))**exp_polar(I*pi)*
exp((sigma*exp_polar(0))**(2*exp_polar(I*pi))*exp_polar(I*pi)*polar_lift(-mu + x)**
(2*exp_polar(0))/2)*exp_polar(0)*polar_lift(x)/(2*sqrt(pi)), (x, -oo, oo))
def test_unpolarify():
from sympy import (exp_polar, polar_lift, exp, unpolarify,
principal_branch)
from sympy import gamma, erf, sin, tanh, uppergamma, Eq, Ne
from sympy.abc import x
p = exp_polar(7*I) + 1
u = exp(7*I) + 1
assert unpolarify(1) == 1
assert unpolarify(p) == u
assert unpolarify(p**2) == u**2
assert unpolarify(p**x) == p**x
assert unpolarify(p*x) == u*x
assert unpolarify(p + x) == u + x
assert unpolarify(sqrt(sin(p))) == sqrt(sin(u))
# Test reduction to principal branch 2*pi.
t = principal_branch(x, 2*pi)
assert unpolarify(t) == x
assert unpolarify(sqrt(t)) == sqrt(t)
# Test exponents_only.
assert unpolarify(p**p, exponents_only=True) == p**u
assert unpolarify(uppergamma(x, p**p)) == uppergamma(x, p**u)
# Test functions.
assert unpolarify(sin(p)) == sin(u)
assert unpolarify(tanh(p)) == tanh(u)
assert unpolarify(gamma(p)) == gamma(u)
assert unpolarify(erf(p)) == erf(u)
assert unpolarify(uppergamma(x, p)) == uppergamma(x, p)
assert unpolarify(uppergamma(sin(p), sin(p + exp_polar(0)))) == \
uppergamma(sin(u), sin(u + 1))
assert unpolarify(uppergamma(polar_lift(0), 2*exp_polar(0))) == \
uppergamma(0, 2)
assert unpolarify(Eq(p, 0)) == Eq(u, 0)
assert unpolarify(Ne(p, 0)) == Ne(u, 0)
assert unpolarify(polar_lift(x) > 0) == (x > 0)
# Test bools
assert unpolarify(True) is True
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue_4757():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| bsd-3-clause | 2,854,585,406,116,193,000 | -5,086,805,490,193,004,000 | 30.743719 | 92 | 0.573176 | false |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/io_scene_3ds/__init__.py | 1 | 6950 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Autodesk 3DS format",
"author": "Bob Holcomb, Campbell Barton",
"blender": (2, 57, 0),
"location": "File > Import-Export",
"description": "Import-Export 3DS, meshes, uvs, materials, textures, "
"cameras & lamps",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Autodesk_3DS",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "import_3ds" in locals():
imp.reload(import_3ds)
if "export_3ds" in locals():
imp.reload(export_3ds)
import bpy
from bpy.props import StringProperty, FloatProperty, BoolProperty, EnumProperty
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
axis_conversion,
)
class Import3DS(bpy.types.Operator, ImportHelper):
"""Import from 3DS file format (.3ds)"""
bl_idname = "import_scene.autodesk_3ds"
bl_label = 'Import 3DS'
bl_options = {'UNDO'}
filename_ext = ".3ds"
filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
constrain_size = FloatProperty(
name="Size Constraint",
description="Scale the model by 10 until it reaches the "
"size constraint (0 to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=10.0,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirectories for any associated images "
"(Warning, may be slow)",
default=True,
)
use_apply_transform = BoolProperty(
name="Apply Transform",
description="Workaround for object transformations "
"importing incorrectly",
default=True,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import import_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
))
global_matrix = axis_conversion(from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return import_3ds.load(self, context, **keywords)
class Export3DS(bpy.types.Operator, ExportHelper):
"""Export to 3DS file format (.3ds)"""
bl_idname = "export_scene.autodesk_3ds"
bl_label = 'Export 3DS'
filename_ext = ".3ds"
filter_glob = StringProperty(
default="*.3ds",
options={'HIDDEN'},
)
use_selection = BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import export_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
"check_existing",
))
global_matrix = axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return export_3ds.save(self, context, **keywords)
# Add to a menu
def menu_func_export(self, context):
self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
def menu_func_import(self, context):
self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
# NOTES:
# why add 1 extra vertex? and remove it when done? -
# "Answer - eekadoodle - would need to re-order UV's without this since face
# order isnt always what we give blender, BMesh will solve :D"
#
# disabled scaling to size, this requires exposing bb (easy) and understanding
# how it works (needs some time)
if __name__ == "__main__":
register()
| gpl-3.0 | 1,672,561,239,094,621,200 | -7,473,244,643,928,881,000 | 32.095238 | 79 | 0.496259 | false |
HyperloopTeam/FullOpenMDAO | cantera-2.0.2/interfaces/python/MixMaster/Units/unit.py | 1 | 2833 | import operator
class unit:
_zero = (0,) * 7
_negativeOne = (-1, ) * 7
_labels = ('m', 'kg', 's', 'A', 'K', 'mol', 'cd')
def __init__(self, value, derivation):
self.value = value
self.derivation = derivation
return
def __add__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value + other.value, self.derivation)
def __sub__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value - other.value, self.derivation)
def __mul__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(other*self.value, self.derivation)
value = self.value * other.value
derivation = tuple(map(operator.add, self.derivation, other.derivation))
return unit(value, derivation)
def __div__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(self.value/other, self.derivation)
value = self.value / other.value
derivation = tuple(map(operator.sub, self.derivation, other.derivation))
return unit(value, derivation)
def __pow__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation
value = self.value ** other
derivation = tuple(map(operator.mul, [other]*7, self.derivation))
return unit(value, derivation)
def __pos__(self): return self
def __neg__(self): return unit(-self.value, self.derivation)
def __abs__(self): return unit(abs(self.value), self.derivation)
def __invert__(self):
value = 1./self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __rmul__(self, other):
return unit.__mul__(self, other)
def __rdiv__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation(self, other)
value = other/self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __float__(self):
return self.value
#if self.derivation == self._zero: return self.value
#raise BadConversion(self)
def __str__(self):
str = "%g" % self.value
for i in range(0, 7):
exponent = self.derivation[i]
if exponent == 0: continue
if exponent == 1:
str = str + " %s" % (self._labels[i])
else:
str = str + " %s^%d" % (self._labels[i], exponent)
return str
dimensionless = unit(1, unit._zero)
| gpl-2.0 | -5,281,598,940,366,677,000 | -6,665,963,887,981,915,000 | 25.476636 | 81 | 0.570773 | false |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/lib2to3/pytree.py | 71 | 28305 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <[email protected]>"
import sys
import warnings
from io import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
yield from child.leaves()
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| lgpl-3.0 | -7,125,816,719,729,900,000 | -4,168,393,343,349,502,000 | 31.760417 | 88 | 0.555238 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/windows_configuration_py3.py | 1 | 2719 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WindowsConfiguration(Model):
"""Specifies Windows operating system settings on the virtual machine.
:param provision_vm_agent: Indicates whether virtual machine agent should
be provisioned on the virtual machine. <br><br> When this property is not
specified in the request body, default behavior is to set it to true.
This will ensure that VM Agent is installed on the VM so that extensions
can be added to the VM later.
:type provision_vm_agent: bool
:param enable_automatic_updates: Indicates whether virtual machine is
enabled for automatic updates.
:type enable_automatic_updates: bool
:param time_zone: Specifies the time zone of the virtual machine. e.g.
"Pacific Standard Time"
:type time_zone: str
:param additional_unattend_content: Specifies additional base-64 encoded
XML formatted information that can be included in the Unattend.xml file,
which is used by Windows Setup.
:type additional_unattend_content:
list[~azure.mgmt.compute.v2016_03_30.models.AdditionalUnattendContent]
:param win_rm: Specifies the Windows Remote Management listeners. This
enables remote Windows PowerShell.
:type win_rm: ~azure.mgmt.compute.v2016_03_30.models.WinRMConfiguration
"""
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(self, *, provision_vm_agent: bool=None, enable_automatic_updates: bool=None, time_zone: str=None, additional_unattend_content=None, win_rm=None, **kwargs) -> None:
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.win_rm = win_rm
| mit | -8,239,756,699,175,881,000 | -7,146,858,022,295,624,000 | 49.351852 | 180 | 0.670835 | false |
ppwwyyxx/tensorflow | tensorflow/python/keras/utils/version_utils.py | 3 | 2667 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities for Keras classes with v1 and v2 versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.util import lazy_loader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
# pylint: disable=g-inconsistent-quotes
training = lazy_loader.LazyLoader(
"training", globals(),
"tensorflow.python.keras.engine.training")
training_v1 = lazy_loader.LazyLoader(
"training_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
# pylint: enable=g-inconsistent-quotes
# TODO(omalleyt): Extend to Layer class once Layer class is split.
class VersionSelector(object):
"""Chooses between Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
new_cls = swap_class(cls, training.Model, training_v1.Model)
return object.__new__(new_cls)
def swap_class(cls, v2_cls, v1_cls):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
if ops.executing_eagerly_outside_functions():
return v2_cls
return v1_cls
# Recursively search superclasses to swap in the right Keras class.
cls.__bases__ = tuple(
swap_class(base, v2_cls, v1_cls) for base in cls.__bases__)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not ops.executing_eagerly_outside_functions():
error_msg = (
"Calling `{cls_name}.{method_name}` in graph mode is not supported "
"when the `{cls_name}` instance was constructed with eager mode "
"enabled. Please construct your `{cls_name}` instance in graph mode or"
" call `{cls_name}.{method_name}` with eager mode enabled.")
error_msg = error_msg.format(cls_name=cls_name, method_name=method_name)
raise ValueError(error_msg)
| apache-2.0 | -1,487,040,122,441,087,200 | 8,823,358,285,462,517,000 | 37.1 | 80 | 0.695163 | false |
matiboy/django_safari_notifications | django_safari_notifications/apps.py | 1 | 1111 | # -*- coding: utf-8
from django.apps import AppConfig
import logging
class DjangoSafariNotificationsConfig(AppConfig):
name = 'django_safari_notifications'
verbose_name = 'Safari Push Notifications'
version = 'v1'
service_base = 'push'
userinfo_key = 'userinfo'
logger = logging.getLogger('django_safari_notifications')
# Provide path to a pem file containing the certificate, the key as well as Apple's WWDRCA
cert = 'path/to/your/cert'
passphrase = 'pass:xxxx' # this will be used with -passin in the openssl command so could be with pass, env etc
# If single site, just set these values. Otherwise create Domain entries
website_conf = None
# sample single site: do not include the authenticationToken
"""
website_conf = {
"websiteName": "Bay Airlines",
"websitePushID": "web.com.example.domain",
"allowedDomains": ["http://domain.example.com"],
"urlFormatString": "http://domain.example.com/%@/?flight=%@",
"webServiceURL": "https://example.com/push"
}
"""
iconset_folder = '/path/to/your/iconset'
| mit | -6,018,387,887,189,308,000 | 2,089,127,860,641,072,000 | 38.678571 | 115 | 0.673267 | false |
TomSkelly/MatchAnnot | showAnnot.py | 1 | 2299 | #!/usr/bin/env python
# Read annotation file, print selected stuff in human-readable format.
# AUTHOR: Tom Skelly ([email protected])
import os
import sys
import optparse
import re # regular expressions
import cPickle as pickle
from tt_log import logger
import Annotations as anno
VERSION = '20150417.01'
def main ():
logger.debug('version %s starting' % VERSION)
opt, args = getParms()
if opt.gtfpickle is not None:
handle = open (opt.gtfpickle, 'r')
pk = pickle.Unpickler (handle)
annotList = pk.load()
handle.close()
else:
annotList = anno.AnnotationList (opt.gtf)
geneList = annotList.getGene (opt.gene)
if geneList is None:
print 'gene %s not found in annotations' % opt.gene
elif len(geneList) != 1:
print 'there are %d occurrences of gene %s in annotations' % (len(geneList), opt.gene)
else:
geneEnt = geneList[0]
print 'gene: ',
printEnt (geneEnt)
for transEnt in geneEnt.getChildren():
print '\ntr: ',
printTran (transEnt)
for exonEnt in transEnt.getChildren():
print 'exon: ',
printEnt (exonEnt)
logger.debug('finished')
return
def printEnt (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1)
return
def printTran (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1),
if hasattr (ent, 'startcodon'):
print ' start: %9d' % ent.startcodon,
if hasattr (ent, 'stopcodon'):
print ' stop: %9d' % ent.stopcodon,
print
return
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <fasta_file> ... ')
parser.add_option ('--gtf', help='annotations in gtf format')
parser.add_option ('--gtfpickle', help='annotations in pickled gtf format')
parser.add_option ('--gene', help='gene to print')
parser.set_defaults (gtf=None,
gtfpickle=None,
gene=None,
)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
| gpl-3.0 | -7,587,547,867,571,006,000 | 270,577,330,769,084,400 | 24.263736 | 94 | 0.579382 | false |
phuihock/birtconn | addons/report_birt/wizard/report_birt.py | 2 | 13419 | # -*- encoding: utf-8 -*-
from collections import OrderedDict
from datetime import datetime, date, time
from lxml import etree
from openerp import netsvc, tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.report.interface import report_int
from openerp.tools.translate import _
import openerp.pooler as pooler
import os
import re
import requests
import simplejson as json
def get_report_api(cr, uid, pool, context=None):
ir_config_parameter = pool.get("ir.config_parameter")
api = ir_config_parameter.get_param(cr, uid, "birtconn.api", context=context)
if not api:
# fallback, look in to config file
api = tools.config.get_misc('birtconn', 'api')
if not api:
raise ValueError("System property 'birtconn.api' is not defined.")
return os.path.join(api, 'report')
def serialize(obj):
if isinstance(obj, datetime):
return datetime.strftime(obj, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if isinstance(obj, date):
return datetime.strftime(obj, tools.DEFAULT_SERVER_DATE_FORMAT)
if isinstance(obj, time):
return datetime.strftime(obj, tools.DEFAULT_SERVER_TIME_FORMAT)
raise TypeError(obj)
class report_birt_report_wizard(osv.osv_memory):
_name = 'report_birt.report_wizard'
_description = 'BIRT Report Wizard'
_columns = {
'__report_name': fields.char(size=64, string="Report Name"),
'__values': fields.text(string="Values"),
}
def _report_get(self, cr, uid, context=None):
if 'report_name' in context:
report_obj = self.pool.get('ir.actions.report.xml')
found = report_obj.search(cr, uid, [('report_name', '=', context['report_name'])])
if found:
report_id = found[0]
report = report_obj.read(cr, uid, report_id, ['report_file'])
report_api = get_report_api(cr, uid, self.pool, context)
r = requests.get('%s?report_file=%s' % (report_api, report['report_file']))
return r.json()
return {}
def default_get_recursively(self, cr, uid, fields_list, parameters, context=None):
res = {}
for param in parameters:
name = param['name']
if name.startswith('__'):
key = name[2:]
if key in context:
res[name] = context[key]
else:
ptype = param['type'].split('/')
if ptype[0] == "scalar":
fieldType = param['fieldType']
if fieldType == 'time':
# 'time' is not supported by OpenERP by default, but datetime is. So, we treat time as datetime field
# and use our custom timepicker widget.
fieldType = 'datetime'
if fieldType == 'char' and ptype[1] == 'multi-value':
val = param['defaultValue']
elif fieldType in ['boolean']:
# unfortunately, boolean field setter converts False to 'False', a truthful value.
# so we override it with the raw value which is in the correct type.
val = param['defaultValue']
else:
conv = getattr(fields, fieldType)(string=param['promptText'])._symbol_set[1]
v1 = param['defaultValue']
if isinstance(v1, (list, tuple)):
val = [conv(v) for v in v1]
else:
val = conv(v1)
res[name] = val
elif ptype[0] == "group":
parameters_g = param['parameters']
res_g = self.default_get_recursively(cr, uid, fields_list, parameters_g, context)
res.update(res_g)
return res
def default_get(self, cr, uid, fields_list, context=None):
res = {}
parameters = self._report_get(cr, uid, context)
return self.default_get_recursively(cr, uid, fields_list, parameters, context)
def fields_get_meta(self, cr, uid, param, context, res, fgroup=None):
name = param['name']
meta = {}
ptype = param['type'].split('/')
if ptype[0] == 'group':
for p in param['parameters']:
self.fields_get_meta(cr, uid, p, context, res, {'name': param['name'], 'string': param['promptText']})
if ptype[0] == 'scalar':
meta['type'] = param['fieldType']
meta['context'] = {
'scalar': ptype[1]
}
# refer to field_to_dict if you don't understand why
if 'selection' in param and param['selection']:
meta['selection'] = param['selection']
meta['type'] = 'selection' # override default input type
meta['string'] = param['promptText']
meta['required'] = param['required']
meta['invisible'] = param['hidden']
meta['help'] = param['helpText']
meta['context']['type'] = param['fieldType'] # actual data type
if fgroup:
meta['context']['fgroup'] = fgroup
res[name] = meta
def fields_get(self, cr, uid, fields_list=None, context=None, write_access=True):
context = context or {}
res = super(report_birt_report_wizard, self).fields_get(cr, uid, fields_list, context, write_access)
for f in self._columns:
for attr in ['invisible', 'readonly']:
res[f][attr] = True
parameters = self._report_get(cr, uid, context)
for param in parameters:
self.fields_get_meta(cr, uid, param, context, res)
def order_by(parameters):
def _wrap(name):
for i, param in enumerate(parameters):
ptype = param['type'].split('/')
if ptype[0] == 'group':
j = order_by(param['parameters'])(name)
if j != -1:
return (i * 10) + j
elif ptype[0] == 'scalar' and param['name'] == name:
return (i * 10)
return -1
return _wrap
if parameters:
res_cur = res
res_new = OrderedDict()
# What we are trying to achieve here is to order fields by the order they (parameters) are defined in the rptdesign. Because
# a parameter may be a group parameter, we will also inspect the group by loop check its content.
for k in sorted(res, cmp, key=order_by(parameters)):
res_new[k] = res_cur[k]
res = res_new
# if no fields is specified, return all
if not fields_list:
return res
else:
# return only what is requested
return dict(filter(lambda (k, v): k in fields_list, a.items()))
def create(self, cr, uid, vals, context=None):
# 'vals' contains all field/value pairs, including report_name, parameters and values.
# But we don't have all the columns since they are dynamically generated
# based on report's parameters.
values = dict(filter(lambda (x, y): x not in ['__report_name', '__values'], vals.items()))
values = json.dumps(values)
report_name = context['report_name']
vals = {
'__report_name': report_name,
'__values': values,
}
return super(report_birt_report_wizard, self).create(cr, uid, vals, context)
def _get_lang_dict(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
lang = context and context.get('lang', 'en_US') or 'en_US'
lang_ids = lang_obj.search(cr, uid, [('code','=',lang)])
if not lang_ids:
lang_ids = lang_obj.search(cr, uid, [('code','=','en_US')])
lang_obj = lang_obj.browse(cr, uid, lang_ids[0])
return {'date_format': lang_obj.date_format, 'time_format': lang_obj.time_format}
def print_report(self, cr, uid, ids, context=None):
lang_dict = self._get_lang_dict(cr, uid, context)
values = self.read(cr, uid, ids[0], ['__values'], context=context)['__values']
values = json.loads(values)
def conv(v1):
# cast value to the correct type with OpenERP's internal setter
v2 = getattr(fields, t1)(**descriptor)._symbol_set[1](v1)
# but sometimes, we have to override value to a different type because
# OpenERP is slightly different/not supporting param type BIRT expects.
if t1 == 'char' and s1 == 'multi-value':
# our multivalue checkbox widget that sends values in an array
if not isinstance(v1, (list, tuple)):
v1 = [v1]
v2 = v1
elif t1 == 'time':
# NOTE: time is represented as datetime field with custom timepicker widget
time_format = lang_dict['time_format']
cc = time_format.count(':') + 1
v2 = datetime.strptime(':'.join(v1.split(':')[:cc]), time_format)
v2 = getattr(fields, 'datetime')(**descriptor)._symbol_set[1](v1)
return v2
report_name = context['report_name']
fg = self.fields_get(cr, uid, context={'report_name': report_name})
for (name, descriptor) in fg.items():
if name not in self._columns:
s1 = descriptor['context']['scalar']
t1 = descriptor['context']['type']
v1 = values[name]
if isinstance(v1, (list, tuple)):
v2 = [conv(v) for v in v1]
else:
v2 = conv(v1)
values[name] = v2
return {
'type': 'ir.actions.report.xml',
'report_name': context['report_name'],
'datas': values,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(report_birt_report_wizard, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu)
if view_type == 'search':
return res
xarch = etree.XML(res['arch'])
group_values = xarch.xpath('//group[@name="__values"]')[0]
for field, descriptor in self.fields_get(cr, uid, context=context).iteritems():
if 'context' in descriptor and 'fgroup' in descriptor['context']:
fgroup = descriptor['context']['fgroup']
elg = xarch.xpath('//group[@name="%(name)s"][@string="%(string)s"]' % fgroup)
if elg:
_g = elg[0]
else:
_g = etree.SubElement(group_values, 'group', name=fgroup['name'], string=fgroup['string'], colspan="2")
el = etree.SubElement(_g, 'field', name=field)
else:
el = etree.SubElement(group_values, 'field', name=field)
if field == 'time':
# use custom timepicker widget
el.set('widget', 'timepicker')
if 'multi-value' == descriptor.get('context', {}).get('scalar'):
# use custom multiselect widget
el.set('widget', 'multiselect')
xarch, xfields = self._view_look_dom_arch(cr, uid, xarch, view_id, context=context)
res['fields'] = xfields
res['arch'] = xarch
return res
report_birt_report_wizard()
class report_birt(report_int):
def __init__(self, name, table, reportxml_id):
super(report_birt, self).__init__(name)
self.table = table
self.reportxml_id = reportxml_id
def create(self, cr, uid, ids, vals, context):
pool = pooler.get_pool(cr.dbname)
pool_reportxml = pool.get('ir.actions.report.xml')
reportxml = pool_reportxml.browse(cr, uid, self.reportxml_id)
headers = {'Content-type': 'application/json', 'Accept': 'application/octet-stream'}
for k, v in context.items():
akey = '__%s' % k
if akey not in vals:
vals[akey] = v
data = {
'reportFile': reportxml.report_file,
'__values': vals,
}
report_api = get_report_api(cr, uid, pool, context)
req = requests.post(report_api, data=json.dumps(data, default=serialize), headers=headers)
ext = re.search(r'filename=.+\.(.+);?', req.headers.get('content-disposition')).group(1)
return (req.content, ext)
class registry(osv.osv):
_inherit = 'ir.actions.report.xml'
def register(self, cr, uid, ids, context=None):
svccls = netsvc.Service
if ids:
cr.execute("SELECT id, model, report_name FROM ir_act_report_xml WHERE id in (%s)" % (', '.join([str(id) for id in ids]),))
result = cr.dictfetchall()
for r in result:
name = 'report.' + r['report_name']
svccls.remove(name)
report_birt(name, r['model'], r['id'])
def register_all(self, cr):
cr.execute("SELECT id FROM ir_act_report_xml WHERE report_type = 'birt' ORDER BY id")
ids = [row['id'] for row in cr.dictfetchall()]
self.register(cr, SUPERUSER_ID, ids)
registry()
| agpl-3.0 | 4,450,069,242,506,705,000 | 3,156,832,280,179,128,000 | 40.289231 | 136 | 0.54393 | false |
eaas-framework/virtualbox | src/VBox/GuestHost/OpenGL/spu_loader/spucopy.py | 22 | 1634 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY spucopy.py SCRIPT */
#include "cr_spu.h"
#include "cr_mem.h"
void crSPUCopyDispatchTable( SPUDispatchTable *dst, SPUDispatchTable *src )
{
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
print '\tdst->%s = src->%s;' % (func_name, func_name)
# if the destination is already a copy of something, we'd better make sure
# that we take it off its source's copy list first.
print """
if (dst->copy_of != NULL)
{
/*
* dst was already a copy, go back to the original,
* and remove dst from the original's copyList.
*/
struct _copy_list_node *temp, *prior = NULL;
for (temp = dst->copy_of->copyList; temp; prior = temp, temp = temp->next)
{
if (temp->copy == dst)
{
if (prior)
{
prior->next = temp->next;
}
else
{
dst->copy_of->copyList = temp->next;
}
crFree( temp );
break;
}
}
}
/*
* Now that dst->copy_of is unused, set it to point to our
* new original.
*/
if (src->copy_of)
{
dst->copy_of = src->copy_of;
}
else
{
dst->copy_of = src;
}
/*
* Create a new copy node, so the src can keep track of the
* new copy (i.e. dst).
*/
{
struct _copy_list_node *copynode;
copynode = (struct _copy_list_node*)crAlloc( sizeof( *copynode ) );
copynode->copy = dst;
copynode->next = src->copyList;
src->copyList = copynode;
}
}
"""
| gpl-2.0 | 5,432,855,839,055,956,000 | 9,180,553,351,150,756,000 | 19.683544 | 76 | 0.630967 | false |
Midrya/chromium | rietveld.py | 1 | 26054 | # coding: utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines class Rietveld to easily access a rietveld instance.
Security implications:
The following hypothesis are made:
- Rietveld enforces:
- Nobody else than issue owner can upload a patch set
- Verifies the issue owner credentials when creating new issues
- A issue owner can't change once the issue is created
- A patch set cannot be modified
"""
import copy
import errno
import json
import logging
import re
import socket
import ssl
import sys
import time
import urllib
import urllib2
import urlparse
import patch
from third_party import upload
import third_party.oauth2client.client as oa2client
from third_party import httplib2
# Appengine replies with 302 when authentication fails (sigh.)
oa2client.REFRESH_STATUS_CODES.append(302)
upload.LOGGER.setLevel(logging.WARNING) # pylint: disable=E1103
class Rietveld(object):
"""Accesses rietveld."""
def __init__(
self, url, auth_config, email=None, extra_headers=None, maxtries=None):
self.url = url.rstrip('/')
self.rpc_server = upload.GetRpcServer(self.url, auth_config, email)
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
def xsrf_token(self):
if (not self._xsrf_token_time or
(time.time() - self._xsrf_token_time) > 30*60):
self._xsrf_token_time = time.time()
self._xsrf_token = self.get(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
return self._xsrf_token
def get_pending_issues(self):
"""Returns an array of dict of all the pending issues on the server."""
# TODO: Convert this to use Rietveld::search(), defined below.
return json.loads(
self.get('/search?format=json&commit=2&closed=3&'
'keys_only=True&limit=1000&order=__key__'))['results']
def close_issue(self, issue):
"""Closes the Rietveld issue for this changelist."""
logging.info('closing issue %d' % issue)
self.post("/%d/close" % issue, [('xsrf_token', self.xsrf_token())])
def get_description(self, issue):
"""Returns the issue's description.
Converts any CRLF into LF and strip extraneous whitespace.
"""
return '\n'.join(self.get('/%d/description' % issue).strip().splitlines())
def get_issue_properties(self, issue, messages):
"""Returns all the issue's metadata as a dictionary."""
url = '/api/%d' % issue
if messages:
url += '?messages=true'
data = json.loads(self.get(url, retry_on_404=True))
data['description'] = '\n'.join(data['description'].strip().splitlines())
return data
def get_depends_on_patchset(self, issue, patchset):
"""Returns the patchset this patchset depends on if it exists."""
url = '/%d/patchset/%d/get_depends_on_patchset' % (issue, patchset)
resp = None
try:
resp = json.loads(self.post(url, []))
except (urllib2.HTTPError, ValueError):
# The get_depends_on_patchset endpoint does not exist on this Rietveld
# instance yet. Ignore the error and proceed.
# TODO(rmistry): Make this an error when all Rietveld instances have
# this endpoint.
pass
return resp
def get_patchset_properties(self, issue, patchset):
"""Returns the patchset properties."""
url = '/api/%d/%d' % (issue, patchset)
return json.loads(self.get(url))
def get_file_content(self, issue, patchset, item):
"""Returns the content of a new file.
Throws HTTP 302 exception if the file doesn't exist or is not a binary file.
"""
# content = 0 is the old file, 1 is the new file.
content = 1
url = '/%d/binary/%d/%d/%d' % (issue, patchset, item, content)
return self.get(url)
def get_file_diff(self, issue, patchset, item):
"""Returns the diff of the file.
Returns a useless diff for binary files.
"""
url = '/download/issue%d_%d_%d.diff' % (issue, patchset, item)
return self.get(url)
def get_patch(self, issue, patchset):
"""Returns a PatchSet object containing the details to apply this patch."""
props = self.get_patchset_properties(issue, patchset) or {}
out = []
for filename, state in props.get('files', {}).iteritems():
logging.debug('%s' % filename)
# If not status, just assume it's a 'M'. Rietveld often gets it wrong and
# just has status: null. Oh well.
status = state.get('status') or 'M'
if status[0] not in ('A', 'D', 'M', 'R'):
raise patch.UnsupportedPatchFormat(
filename, 'Change with status \'%s\' is not supported.' % status)
svn_props = self.parse_svn_properties(
state.get('property_changes', ''), filename)
if state.get('is_binary'):
if status[0] == 'D':
if status[0] != status.strip():
raise patch.UnsupportedPatchFormat(
filename, 'Deleted file shouldn\'t have property change.')
out.append(patch.FilePatchDelete(filename, state['is_binary']))
else:
content = self.get_file_content(issue, patchset, state['id'])
if not content:
# As a precaution due to a bug in upload.py for git checkout, refuse
# empty files. If it's empty, it's not a binary file.
raise patch.UnsupportedPatchFormat(
filename,
'Binary file is empty. Maybe the file wasn\'t uploaded in the '
'first place?')
out.append(patch.FilePatchBinary(
filename,
content,
svn_props,
is_new=(status[0] == 'A')))
continue
try:
diff = self.get_file_diff(issue, patchset, state['id'])
except urllib2.HTTPError, e:
if e.code == 404:
raise patch.UnsupportedPatchFormat(
filename, 'File doesn\'t have a diff.')
raise
# FilePatchDiff() will detect file deletion automatically.
p = patch.FilePatchDiff(filename, diff, svn_props)
out.append(p)
if status[0] == 'A':
# It won't be set for empty file.
p.is_new = True
if (len(status) > 1 and
status[1] == '+' and
not (p.source_filename or p.svn_properties)):
raise patch.UnsupportedPatchFormat(
filename, 'Failed to process the svn properties')
return patch.PatchSet(out)
@staticmethod
def parse_svn_properties(rietveld_svn_props, filename):
"""Returns a list of tuple [('property', 'newvalue')].
rietveld_svn_props is the exact format from 'svn diff'.
"""
rietveld_svn_props = rietveld_svn_props.splitlines()
svn_props = []
if not rietveld_svn_props:
return svn_props
# 1. Ignore svn:mergeinfo.
# 2. Accept svn:eol-style and svn:executable.
# 3. Refuse any other.
# \n
# Added: svn:ignore\n
# + LF\n
spacer = rietveld_svn_props.pop(0)
if spacer or not rietveld_svn_props:
# svn diff always put a spacer between the unified diff and property
# diff
raise patch.UnsupportedPatchFormat(
filename, 'Failed to parse svn properties.')
while rietveld_svn_props:
# Something like 'Added: svn:eol-style'. Note the action is localized.
# *sigh*.
action = rietveld_svn_props.pop(0)
match = re.match(r'^(\w+): (.+)$', action)
if not match or not rietveld_svn_props:
raise patch.UnsupportedPatchFormat(
filename,
'Failed to parse svn properties: %s, %s' % (action, svn_props))
if match.group(2) == 'svn:mergeinfo':
# Silently ignore the content.
rietveld_svn_props.pop(0)
continue
if match.group(1) not in ('Added', 'Modified'):
# Will fail for our French friends.
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property operation.')
if match.group(2) in ('svn:eol-style', 'svn:executable', 'svn:mime-type'):
# ' + foo' where foo is the new value. That's fragile.
content = rietveld_svn_props.pop(0)
match2 = re.match(r'^ \+ (.*)$', content)
if not match2:
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property format.')
svn_props.append((match.group(2), match2.group(1)))
return svn_props
def update_description(self, issue, description):
"""Sets the description for an issue on Rietveld."""
logging.info('new description for issue %d' % issue)
self.post('/%d/description' % issue, [
('description', description),
('xsrf_token', self.xsrf_token())])
def add_comment(self, issue, message, add_as_reviewer=False):
max_message = 10000
tail = '…\n(message too large)'
if len(message) > max_message:
message = message[:max_message-len(tail)] + tail
logging.info('issue %d; comment: %s' % (issue, message.strip()[:300]))
return self.post('/%d/publish' % issue, [
('xsrf_token', self.xsrf_token()),
('message', message),
('message_only', 'True'),
('add_as_reviewer', str(bool(add_as_reviewer))),
('send_mail', 'True'),
('no_redirect', 'True')])
def add_inline_comment(
self, issue, text, side, snapshot, patchset, patchid, lineno):
logging.info('add inline comment for issue %d' % issue)
return self.post('/inline_draft', [
('issue', str(issue)),
('text', text),
('side', side),
('snapshot', snapshot),
('patchset', str(patchset)),
('patch', str(patchid)),
('lineno', str(lineno))])
def set_flag(self, issue, patchset, flag, value):
return self.post('/%d/edit_flags' % issue, [
('last_patchset', str(patchset)),
('xsrf_token', self.xsrf_token()),
(flag, str(value))])
def search(
self,
owner=None, reviewer=None,
base=None,
closed=None, private=None, commit=None,
created_before=None, created_after=None,
modified_before=None, modified_after=None,
per_request=None, keys_only=False,
with_messages=False):
"""Yields search results."""
# These are expected to be strings.
string_keys = {
'owner': owner,
'reviewer': reviewer,
'base': base,
'created_before': created_before,
'created_after': created_after,
'modified_before': modified_before,
'modified_after': modified_after,
}
# These are either None, False or True.
three_state_keys = {
'closed': closed,
'private': private,
'commit': commit,
}
url = '/search?format=json'
# Sort the keys mainly to ease testing.
for key in sorted(string_keys):
value = string_keys[key]
if value:
url += '&%s=%s' % (key, urllib2.quote(value))
for key in sorted(three_state_keys):
value = three_state_keys[key]
if value is not None:
url += '&%s=%d' % (key, int(value) + 1)
if keys_only:
url += '&keys_only=True'
if with_messages:
url += '&with_messages=True'
if per_request:
url += '&limit=%d' % per_request
cursor = ''
while True:
output = self.get(url + cursor)
if output.startswith('<'):
# It's an error message. Return as no result.
break
data = json.loads(output) or {}
if not data.get('results'):
break
for i in data['results']:
yield i
cursor = '&cursor=%s' % data['cursor']
def trigger_try_jobs(
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
"""Requests new try jobs.
|builders_and_tests| is a map of builders: [tests] to run.
|master| is the name of the try master the builders belong to.
|category| is used to distinguish regular jobs and experimental jobs.
Returns the keys of the new TryJobResult entites.
"""
params = [
('reason', reason),
('clobber', 'True' if clobber else 'False'),
('builders', json.dumps(builders_and_tests)),
('xsrf_token', self.xsrf_token()),
('category', category),
]
if revision:
params.append(('revision', revision))
if master:
# Temporarily allow empty master names for old configurations. The try
# job will not be associated with a master name on rietveld. This is
# going to be deprecated.
params.append(('master', master))
return self.post('/%d/try/%d' % (issue, patchset), params)
def trigger_distributed_try_jobs(
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
"""Requests new try jobs.
|masters| is a map of masters: map of builders: [tests] to run.
|category| is used to distinguish regular jobs and experimental jobs.
"""
for (master, builders_and_tests) in masters.iteritems():
self.trigger_try_jobs(
issue, patchset, reason, clobber, revision, builders_and_tests,
master, category)
def get_pending_try_jobs(self, cursor=None, limit=100):
"""Retrieves the try job requests in pending state.
Returns a tuple of the list of try jobs and the cursor for the next request.
"""
url = '/get_pending_try_patchsets?limit=%d' % limit
extra = ('&cursor=' + cursor) if cursor else ''
data = json.loads(self.get(url + extra))
return data['jobs'], data['cursor']
def get(self, request_path, **kwargs):
kwargs.setdefault('payload', None)
return self._send(request_path, **kwargs)
def post(self, request_path, data, **kwargs):
ctype, body = upload.EncodeMultipartFormData(data, [])
return self._send(request_path, payload=body, content_type=ctype, **kwargs)
def _send(self, request_path, retry_on_404=False, **kwargs):
"""Sends a POST/GET to Rietveld. Returns the response body."""
# rpc_server.Send() assumes timeout=None by default; make sure it's set
# to something reasonable.
kwargs.setdefault('timeout', 15)
logging.debug('POSTing to %s, args %s.', request_path, kwargs)
try:
# Sadly, upload.py calls ErrorExit() which does a sys.exit(1) on HTTP
# 500 in AbstractRpcServer.Send().
old_error_exit = upload.ErrorExit
def trap_http_500(msg):
"""Converts an incorrect ErrorExit() call into a HTTPError exception."""
m = re.search(r'(50\d) Server Error', msg)
if m:
# Fake an HTTPError exception. Cheezy. :(
raise urllib2.HTTPError(
request_path, int(m.group(1)), msg, None, None)
old_error_exit(msg)
upload.ErrorExit = trap_http_500
for retry in xrange(self._maxtries):
try:
logging.debug('%s' % request_path)
result = self.rpc_server.Send(request_path, **kwargs)
# Sometimes GAE returns a HTTP 200 but with HTTP 500 as the content.
# How nice.
return result
except urllib2.HTTPError, e:
if retry >= (self._maxtries - 1):
raise
flake_codes = [500, 502, 503]
if retry_on_404:
flake_codes.append(404)
if e.code not in flake_codes:
raise
except urllib2.URLError, e:
if retry >= (self._maxtries - 1):
raise
if (not 'Name or service not known' in e.reason and
not 'EOF occurred in violation of protocol' in e.reason and
# On windows we hit weird bug http://crbug.com/537417
# with message '[Errno 10060] A connection attempt failed...'
not (sys.platform.startswith('win') and
isinstance(e.reason, socket.error) and
e.reason.errno == errno.ETIMEDOUT
)
):
# Usually internal GAE flakiness.
raise
except ssl.SSLError, e:
if retry >= (self._maxtries - 1):
raise
if not 'timed out' in str(e):
raise
# If reaching this line, loop again. Uses a small backoff.
time.sleep(min(10, 1+retry*2))
except urllib2.HTTPError as e:
print 'Request to %s failed: %s' % (e.geturl(), e.read())
raise
finally:
upload.ErrorExit = old_error_exit
# DEPRECATED.
Send = get
class OAuthRpcServer(object):
def __init__(self,
host,
client_email,
client_private_key,
private_key_password='notasecret',
user_agent=None,
timeout=None,
extra_headers=None):
"""Wrapper around httplib2.Http() that handles authentication.
client_email: email associated with the service account
client_private_key: encrypted private key, as a string
private_key_password: password used to decrypt the private key
"""
# Enforce https
host_parts = urlparse.urlparse(host)
if host_parts.scheme == 'https': # fine
self.host = host
elif host_parts.scheme == 'http':
upload.logging.warning('Changing protocol to https')
self.host = 'https' + host[4:]
else:
msg = 'Invalid url provided: %s' % host
upload.logging.error(msg)
raise ValueError(msg)
self.host = self.host.rstrip('/')
self.extra_headers = extra_headers or {}
if not oa2client.HAS_OPENSSL:
logging.error("No support for OpenSSL has been found, "
"OAuth2 support requires it.")
logging.error("Installing pyopenssl will probably solve this issue.")
raise RuntimeError('No OpenSSL support')
self.creds = oa2client.SignedJwtAssertionCredentials(
client_email,
client_private_key,
'https://www.googleapis.com/auth/userinfo.email',
private_key_password=private_key_password,
user_agent=user_agent)
self._http = self.creds.authorize(httplib2.Http(timeout=timeout))
def Send(self,
request_path,
payload=None,
content_type='application/octet-stream',
timeout=None,
extra_headers=None,
**kwargs):
"""Send a POST or GET request to the server.
Args:
request_path: path on the server to hit. This is concatenated with the
value of 'host' provided to the constructor.
payload: request is a POST if not None, GET otherwise
timeout: in seconds
extra_headers: (dict)
"""
# This method signature should match upload.py:AbstractRpcServer.Send()
method = 'GET'
headers = self.extra_headers.copy()
headers.update(extra_headers or {})
if payload is not None:
method = 'POST'
headers['Content-Type'] = content_type
prev_timeout = self._http.timeout
try:
if timeout:
self._http.timeout = timeout
# TODO(pgervais) implement some kind of retry mechanism (see upload.py).
url = self.host + request_path
if kwargs:
url += "?" + urllib.urlencode(kwargs)
# This weird loop is there to detect when the OAuth2 token has expired.
# This is specific to appengine *and* rietveld. It relies on the
# assumption that a 302 is triggered only by an expired OAuth2 token. This
# prevents any usage of redirections in pages accessed this way.
# This variable is used to make sure the following loop runs only twice.
redirect_caught = False
while True:
try:
ret = self._http.request(url,
method=method,
body=payload,
headers=headers,
redirections=0)
except httplib2.RedirectLimit:
if redirect_caught or method != 'GET':
logging.error('Redirection detected after logging in. Giving up.')
raise
redirect_caught = True
logging.debug('Redirection detected. Trying to log in again...')
self.creds.access_token = None
continue
break
return ret[1]
finally:
self._http.timeout = prev_timeout
class JwtOAuth2Rietveld(Rietveld):
"""Access to Rietveld using OAuth authentication.
This class is supposed to be used only by bots, since this kind of
access is restricted to service accounts.
"""
# The parent__init__ is not called on purpose.
# pylint: disable=W0231
def __init__(self,
url,
client_email,
client_private_key_file,
private_key_password=None,
extra_headers=None,
maxtries=None):
if private_key_password is None: # '' means 'empty password'
private_key_password = 'notasecret'
self.url = url.rstrip('/')
bot_url = self.url
if self.url.endswith('googleplex.com'):
bot_url = self.url + '/bots'
with open(client_private_key_file, 'rb') as f:
client_private_key = f.read()
logging.info('Using OAuth login: %s' % client_email)
self.rpc_server = OAuthRpcServer(bot_url,
client_email,
client_private_key,
private_key_password=private_key_password,
extra_headers=extra_headers or {})
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
class CachingRietveld(Rietveld):
"""Caches the common queries.
Not to be used in long-standing processes, like the commit queue.
"""
def __init__(self, *args, **kwargs):
super(CachingRietveld, self).__init__(*args, **kwargs)
self._cache = {}
def _lookup(self, function_name, args, update):
"""Caches the return values corresponding to the arguments.
It is important that the arguments are standardized, like None vs False.
"""
function_cache = self._cache.setdefault(function_name, {})
if args not in function_cache:
function_cache[args] = update(*args)
return copy.deepcopy(function_cache[args])
def get_description(self, issue):
return self._lookup(
'get_description',
(issue,),
super(CachingRietveld, self).get_description)
def get_issue_properties(self, issue, messages):
"""Returns the issue properties.
Because in practice the presubmit checks often ask without messages first
and then with messages, always ask with messages and strip off if not asked
for the messages.
"""
# It's a tad slower to request with the message but it's better than
# requesting the properties twice.
data = self._lookup(
'get_issue_properties',
(issue, True),
super(CachingRietveld, self).get_issue_properties)
if not messages:
# Assumes self._lookup uses deepcopy.
del data['messages']
return data
def get_patchset_properties(self, issue, patchset):
return self._lookup(
'get_patchset_properties',
(issue, patchset),
super(CachingRietveld, self).get_patchset_properties)
class ReadOnlyRietveld(object):
"""
Only provides read operations, and simulates writes locally.
Intentionally do not inherit from Rietveld to avoid any write-issuing
logic to be invoked accidentally.
"""
# Dictionary of local changes, indexed by issue number as int.
_local_changes = {}
def __init__(self, *args, **kwargs):
# We still need an actual Rietveld instance to issue reads, just keep
# it hidden.
self._rietveld = Rietveld(*args, **kwargs)
@classmethod
def _get_local_changes(cls, issue):
"""Returns dictionary of local changes for |issue|, if any."""
return cls._local_changes.get(issue, {})
@property
def url(self):
return self._rietveld.url
def get_pending_issues(self):
pending_issues = self._rietveld.get_pending_issues()
# Filter out issues we've closed or unchecked the commit checkbox.
return [issue for issue in pending_issues
if not self._get_local_changes(issue).get('closed', False) and
self._get_local_changes(issue).get('commit', True)]
def close_issue(self, issue): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: closing issue %d' % issue)
ReadOnlyRietveld._local_changes.setdefault(issue, {})['closed'] = True
def get_issue_properties(self, issue, messages):
data = self._rietveld.get_issue_properties(issue, messages)
data.update(self._get_local_changes(issue))
return data
def get_patchset_properties(self, issue, patchset):
return self._rietveld.get_patchset_properties(issue, patchset)
def get_depends_on_patchset(self, issue, patchset):
return self._rietveld.get_depends_on_patchset(issue, patchset)
def get_patch(self, issue, patchset):
return self._rietveld.get_patch(issue, patchset)
def update_description(self, issue, description): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: new description for issue %d: %s' %
(issue, description))
def add_comment(self, # pylint:disable=R0201
issue,
message,
add_as_reviewer=False):
logging.info('ReadOnlyRietveld: posting comment "%s" to issue %d' %
(message, issue))
def set_flag(self, issue, patchset, flag, value): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: setting flag "%s" to "%s" for issue %d' %
(flag, value, issue))
ReadOnlyRietveld._local_changes.setdefault(issue, {})[flag] = value
def trigger_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(builders_and_tests, issue))
def trigger_distributed_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(masters, issue))
| bsd-3-clause | 8,151,900,532,911,018,000 | -5,175,005,861,535,994,000 | 34.253045 | 80 | 0.620183 | false |
0x27/clusterd | src/platform/tomcat/auxiliary/info_dump.py | 6 | 1972 | from src.platform.tomcat.authenticate import checkAuth
from src.platform.tomcat.interfaces import TINTERFACES
from auxiliary import Auxiliary
from log import LOG
import utility
class Auxiliary:
""" The Manager application for Tomcat has a nifty fingerprinting
app that allows us to retrieve host OS, versioning, arch, etc.
which may aid in targeting payloads.
"""
def __init__(self):
self.name = 'Gather Tomcat info'
self.versions = ['Any']
self.flag = 'tc-info'
def check(self, fingerprint):
"""
"""
if fingerprint.title == TINTERFACES.MAN:
return True
return False
def run(self, fingerengine, fingerprint):
utility.Msg("Attempting to retrieve Tomcat info...")
base = "http://{0}:{1}".format(fingerengine.options.ip,
fingerprint.port)
relative = '/manager/serverinfo'
if fingerprint.version in ["7.0", "8.0"]:
relative = '/manager/text/serverinfo'
url = base + relative
response = utility.requests_get(url)
if response.status_code == 401:
utility.Msg("Host %s:%s requires auth, checking..." %
(fingerengine.options.ip, fingerprint.port), LOG.DEBUG)
cookies = checkAuth(fingerengine.options.ip, fingerprint.port,
fingerprint.title, fingerprint.version)
if cookies:
response = utility.requests_get(url, cookies=cookies[0],
auth=cookies[1])
else:
utility.Msg("Could not get auth for %s:%s" %
(fingerengine.options.ip, fingerprint.port), LOG.ERROR)
return
if response.status_code == 200:
info = response.content.split('\n')[1:-1]
for entry in info:
utility.Msg(entry)
| mit | 2,479,195,916,713,872,000 | -4,935,769,705,113,609,000 | 32.423729 | 87 | 0.559838 | false |
josephnoir/RIOT | cpu/esp8266/vendor/esp-idf/partition_table/gen_esp32part.py | 6 | 14010 | #!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import struct
import sys
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
__version__ = '1.0'
quiet = False
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
if not quiet:
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = 0x5000 # first offset after partition table
for e in res:
if e.offset is None:
pad_to = 0x10000 if e.type == PartitionDefinition.APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < 0x5000:
raise InputError("Partition offset 0x%x is below 0x5000" % p.offset)
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset+last.size-1))
last = p
@classmethod
def from_binary(cls, b):
result = cls()
for o in range(0,len(b),32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
if len(result )>= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = [ "# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags" ]
rows += [ x.to_csv(simple_formatting) for x in self ]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app" : APP_TYPE,
"data" : DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE : {
"factory" : 0x00,
"test" : 0x20,
},
DATA_TYPE : {
"ota" : 0x00,
"phy" : 0x01,
"nvs" : 0x02,
"coredump" : 0x03,
"esphttpd" : 0x80,
"fat" : 0x81,
"spiffs" : 0x82,
},
}
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE : 0x1000,
DATA_TYPE : 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted" : 0
}
# add subtypes for the 16 OTA slot values ("ota_XXX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [ f.strip() for f in line_w_defaults.split(",") ]
res = PartitionDefinition()
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, self.TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, self.SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval)
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
STRUCT_FORMAT = "<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1<<bit):
setattr(res, flag, True)
flags &= ~(1<<bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [ flag for flag in self.FLAGS.keys() if getattr(self, flag) ]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [ (0x100000, "M"), (0x400, "K") ]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting == False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([ self.name,
lookup_keyword(self.type, self.TYPES),
lookup_keyword(self.subtype, self.SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [ ("k",1024), ("m",1024*1024) ]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--verify', '-v', help='Verify partition table fields', default=True, action='store_false')
parser.add_argument('--quiet', '-q', help="Don't print status messages to stderr", action='store_true')
parser.add_argument('input', help='Path to CSV or binary file to parse. Will use stdin if omitted.', type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted, unless the --display argument is also passed (in which case only the summary is printed.)',
nargs='?',
default='-')
args = parser.parse_args()
quiet = args.quiet
input = args.input.read()
input_is_binary = input[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input)
else:
input = input.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input)
if args.verify:
status("Verifying table...")
table.verify()
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
with sys.stdout.buffer if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| lgpl-2.1 | -1,700,023,855,380,943,600 | -523,633,929,552,322,750 | 35.015424 | 204 | 0.561456 | false |
xapi-project/xen-api-sdk | python/samples/vm_start_async.py | 8 | 3389 | #!/usr/bin/env python
# Copyright (c) Citrix Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple example to demonstrate how to use an aasynchronous operation,
# namely the asynchronous version of the VM start method.
# The example assumes the presence of a halted VM named "new", starts
# the VM, retrieves the task reference and queries the task's status.
import pprint
import time
import sys
import XenAPI
def main(session):
print "Looking for a halted VM..."
vms = session.xenapi.VM.get_all_records()
vm = None
for vm_ref in vms:
vm_rec = vms[vm_ref]
if not vm_rec['is_a_template'] and not vm_rec['is_control_domain']\
and vm_rec["power_state"] == "Halted":
print "Found it:"
pprint.pprint(vm_rec)
vm = vm_ref
break
if vm is None:
print "Unable to find a halted VM"
return
print "Attempting to start the halted VM asynchronously"
task = session.xenapi.Async.VM.start(vm, False, True)
task_record = session.xenapi.task.get_record(task)
print "The initial contents of the task record:"
pprint.pprint(task_record)
print "Waiting for the task to complete..."
while session.xenapi.task.get_status(task) == "pending":
time.sleep(1)
task_record = session.xenapi.task.get_record(task)
print "The final contents of the task record:"
pprint.pprint(task_record)
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Usage:"
print sys.argv[0], " <url> <username> <password>"
sys.exit(1)
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
# First acquire a valid session by logging in:
new_session = XenAPI.Session(url)
try:
new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-vm-start-async.py")
except XenAPI.Failure as f:
print "Failed to acquire a session: %s" % f.details
sys.exit(1)
try:
main(new_session)
finally:
new_session.xenapi.session.logout()
| bsd-2-clause | -272,498,261,901,608,500 | -171,901,627,437,982,940 | 34.673684 | 110 | 0.686633 | false |
D-L/SimpleBookMarks | src/tornado/auth.py | 15 | 57309 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import Future, chain_future, return_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.util import bytes_type, u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = Future()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See `GoogleMixin` below for a customized example (which also
includes OAuth support).
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, self.async_callback(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` and `FriendFeedMixin` below for example implementations,
or `GoogleMixin` for an OAuth/OpenID hybrid.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some sevices (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
self.async_callback(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id
}
if extra_params:
args.update(extra_params)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = self.async_callback(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then copy
your Consumer Key and Consumer Secret to the application
`~tornado.web.Application.settings` ``friendfeed_consumer_key``
and ``friendfeed_consumer_secret``. Use this mixin on the handler
for the URL you registered as your application's Callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedLoginHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user()` includes the
attributes ``username``, ``name``, and ``description`` in addition to
``access_token``. You should save the access token with the user;
it is required to make requests on behalf of the user later with
`friendfeed_request()`.
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
@_auth_return_future
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned
through that process includes an ``access_token`` attribute that
can be used to make authenticated requests via this
method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token, callback):
user = yield self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token)
if user:
user["username"] = user["id"]
callback(user)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for
authentication or to access Google resources on behalf of a user.
Google implements both OpenID and OAuth in a hybrid mode. If you
just need the user's identity, use
`~OpenIdMixin.authenticate_redirect`. If you need to make
requests to Google on behalf of the user, use
`authorize_redirect`. On return, parse the response with
`~OpenIdMixin.get_authenticated_user`. We send a dict containing
the values for the user, including ``email``, ``name``, and
``locale``.
Example usage::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
@return_future
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources which can be used in the ``oauth_scope``
argument are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.items():
if name.startswith("openid.ns.") and \
values[-1] == b"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = self.get_auth_http_client()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
chain_future(OpenIdMixin.get_authenticated_user(self),
callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user_future(self, access_token):
return OpenIdMixin.get_authenticated_user(self)
class FacebookMixin(object):
"""Facebook Connect authentication.
*Deprecated:* New applications should use `FacebookGraphMixin`
below instead of this class. This class does not support the
Future-based interface seen on other classes in this module.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
``facebook_api_key`` and ``facebook_secret``.
When your application is set up, you can use this mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
yield self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by `get_authenticated_user` includes the
attributes ``facebook_uid`` and ``name`` in addition to session attributes
like ``session_key``. You should save the session key with the user; it is
required to make requests on behalf of the user later with
`facebook_request`.
"""
@return_future
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None, callback=None):
"""Authenticates/installs this app for the current user.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode_type, bytes_type)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib_parse.urlencode(args))
callback()
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None, callback=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
return self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions,
callback=callback)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square,"
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
gen_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
gen_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
gen_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode_type):
body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=self.async_callback(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| gpl-2.0 | 984,096,605,336,370,700 | -1,072,816,479,030,398,000 | 40.588534 | 109 | 0.595997 | false |
planetserver/webclient | js/proj4js/tools/mergejs.py | 161 | 7973 | #!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# or (ideally) within a class comment definition
#
# /**
# * @class
# *
# * @requires OpenLayers/Layer.js
# */
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2007 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
import glob
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
[last]
core/api.js
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [line.strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip()] # Skip blank lines
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
include = False
for included in cfg.include:
if glob.fnmatch.fnmatch(filepath, included):
include = True
if include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
exclude = False
for excluded in cfg.exclude:
if glob.fnmatch.fnmatch(filepath, excluded):
exclude = True
if not exclude:
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
| gpl-3.0 | -3,711,416,626,234,582,500 | 2,849,754,584,693,026,000 | 28.861423 | 90 | 0.578829 | false |
Mazecreator/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 | -4,288,006,582,208,079,000 | -6,151,184,247,190,824,000 | 63.842105 | 106 | 0.787744 | false |
jmhodges/letsencrypt | letsencrypt-nginx/letsencrypt_nginx/parser.py | 26 | 16596 | """NginxParser is a member object of the NginxConfigurator class."""
import glob
import logging
import os
import pyparsing
import re
from letsencrypt import errors
from letsencrypt_nginx import obj
from letsencrypt_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized abosulte path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~letsencrypt_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x: srv.append(x[1]))
# Find 'include' statements in server blocks and append their trees
for i, server in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = new_server
for filename in servers:
for server in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = _parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = list(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warn("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": ssl_options}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp'):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
"""
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
with open(filename, 'w') as _file:
nginxparser.dump(tree, _file)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def _has_server_names(self, entry, names):
"""Checks if a server block has the given set of server_names. This
is the primary way of identifying server blocks in the configurator.
Returns false if 'entry' doesn't look like a server block at all.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param list entry: The block to search
:param set names: The names to match
:rtype: bool
"""
if len(names) == 0:
# Nothing to identify blocks with
return False
if not isinstance(entry, list):
# Can't be a server block
return False
new_entry = self._get_included_directives(entry)
server_names = set()
for item in new_entry:
if not isinstance(item, list):
# Can't be a server block
return False
if item[0] == 'server_name':
server_names.update(_get_servernames(item[1]))
return server_names == names
def add_server_directives(self, filename, names, directives,
replace=False):
"""Add or replace directives in the first server block with names.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param str filename: The absolute filename of the config file
:param set names: The server_name to match
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
_do_for_subarray(self.parsed[filename],
lambda x: self._has_server_names(x, names),
lambda x: _add_directives(x, directives, replace))
def add_http_directives(self, filename, directives):
"""Adds directives to the first encountered HTTP block in filename.
:param str filename: The absolute filename of the config file
:param list directives: The directives to add
"""
_do_for_subarray(self.parsed[filename],
lambda x: x[0] == ['http'],
lambda x: _add_directives(x[1], [directives], False))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if isinstance(entry, list):
if condition(entry):
func(entry)
else:
for item in entry:
_do_for_subarray(item, condition, func)
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error:
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
entry[0] == 'include' and len(entry) == 2 and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def _parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {}
parsed_server['addrs'] = set()
parsed_server['ssl'] = False
parsed_server['names'] = set()
for directive in server:
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
return parsed_server
def _add_directives(block, directives, replace=False):
"""Adds or replaces directives in a block. If the directive doesn't exist in
the entry already, raises a misconfiguration error.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
if replace:
for directive in directives:
changed = False
if len(directive) == 0:
continue
for index, line in enumerate(block):
if len(line) > 0 and line[0] == directive[0]:
block[index] = directive
changed = True
if not changed:
raise errors.MisconfigurationError(
'LetsEncrypt expected directive for %s in the Nginx '
'config but did not find it.' % directive[0])
else:
block.extend(directives)
| apache-2.0 | -312,035,385,448,733,100 | -3,839,233,078,804,097,000 | 32.663286 | 80 | 0.567908 | false |
redhatrises/freeipa | ipatests/test_webui/ui_driver.py | 2 | 60841 | # Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base class for UI integration tests.
Contains browser driver and common tasks.
"""
from __future__ import print_function
import nose
from datetime import datetime
import time
import re
import os
from functools import wraps
from nose.plugins.skip import SkipTest
# pylint: disable=import-error
from six.moves.urllib.error import URLError
# pylint: enable=import-error
try:
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.ui import Select
NO_SELENIUM = False
except ImportError:
NO_SELENIUM = True
try:
import yaml
NO_YAML = False
except ImportError:
NO_YAML = True
from ipaplatform.paths import paths
ENV_MAP = {
'MASTER': 'ipa_server',
'ADMINID': 'ipa_admin',
'ADMINPW': 'ipa_password',
'DOMAIN': 'ipa_domain',
'IPA_REALM': 'ipa_realm',
'IPA_IP': 'ipa_ip',
'IPA_NO_CA': 'no_ca',
'IPA_NO_DNS': 'no_dns',
'IPA_HAS_TRUSTS': 'has_trusts',
'IPA_HAS_KRA': 'has_kra',
'IPA_HOST_CSR_PATH': 'host_csr_path',
'IPA_SERVICE_CSR_PATH': 'service_csr_path',
'AD_DOMAIN': 'ad_domain',
'AD_DC': 'ad_dc',
'AD_ADMIN': 'ad_admin',
'AD_PASSWORD': 'ad_password',
'AD_DC_IP': 'ad_dc_ip',
'TRUST_SECRET': 'trust_secret',
'SEL_TYPE': 'type',
'SEL_BROWSER': 'browser',
'SEL_HOST': 'host',
'FF_PROFILE': 'ff_profile',
}
DEFAULT_BROWSER = 'firefox'
DEFAULT_PORT = 4444
DEFAULT_TYPE = 'local'
def screenshot(fn):
"""
Decorator for saving screenshot on exception (test fail)
Should be applied on methods of UI_driver subclasses
"""
@wraps(fn)
def screenshot_wrapper(*args):
try:
return fn(*args)
except SkipTest:
raise
except Exception:
self = args[0]
name = '%s_%s_%s' % (
datetime.now().isoformat(),
self.__class__.__name__,
fn.__name__)
self.take_screenshot(name)
raise
return screenshot_wrapper
class UI_driver(object):
"""
Base class for all UI integration tests
"""
@classmethod
def setup_class(cls):
if NO_SELENIUM:
raise nose.SkipTest('Selenium not installed')
def setup(self, driver=None, config=None):
self.request_timeout = 30
self.driver = driver
self.config = config
if not config:
self.load_config()
if not self.driver:
self.driver = self.get_driver()
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def load_config(self):
"""
Load configuration
1) From ~/.ipa/ui_test.conf
2) From environmental variables
"""
# load config file
path = os.path.join(os.path.expanduser("~"), ".ipa/ui_test.conf")
if not NO_YAML and os.path.isfile(path):
try:
with open(path, 'r') as conf:
self.config = yaml.load(conf)
except yaml.YAMLError as e:
raise nose.SkipTest("Invalid Web UI config.\n%s" % e)
except IOError as e:
raise nose.SkipTest("Can't load Web UI test config: %s" % e)
else:
self.config = {}
c = self.config
# override with environmental variables
for k, v in ENV_MAP.items():
val = os.environ.get(k)
if val is not None:
c[v] = val
# apply defaults
if 'port' not in c:
c['port'] = DEFAULT_PORT
if 'browser' not in c:
c['browser'] = DEFAULT_BROWSER
if 'type' not in c:
c['type'] = DEFAULT_TYPE
def get_driver(self):
"""
Get WebDriver according to configuration
"""
browser = self.config["browser"]
port = self.config["port"]
driver_type = self.config["type"]
options = None
if browser == 'chromium':
options = ChromeOptions()
options.binary_location = paths.CHROMIUM_BROWSER
if driver_type == 'remote':
if 'host' not in self.config:
raise nose.SkipTest('Selenium server host not configured')
host = self.config["host"]
if browser == 'chrome':
capabilities = DesiredCapabilities.CHROME
elif browser == 'chromium':
capabilities = options.to_capabilities()
elif browser == 'ie':
capabilities = DesiredCapabilities.INTERNETEXPLORER
else:
capabilities = DesiredCapabilities.FIREFOX
try:
driver = webdriver.Remote(
command_executor='http://%s:%d/wd/hub' % (host, port),
desired_capabilities=capabilities)
except URLError as e:
raise nose.SkipTest('Error connecting to selenium server: %s' % e)
except RuntimeError as e:
raise nose.SkipTest('Error while establishing webdriver: %s' % e)
else:
try:
if browser == 'chrome' or browser == 'chromium':
driver = webdriver.Chrome(chrome_options=options)
elif browser == 'ie':
driver = webdriver.Ie()
else:
fp = None
if "ff_profile" in self.config:
fp = webdriver.FirefoxProfile(self.config["ff_profile"])
driver = webdriver.Firefox(fp)
except URLError as e:
raise nose.SkipTest('Error connecting to selenium server: %s' % e)
except RuntimeError as e:
raise nose.SkipTest('Error while establishing webdriver: %s' % e)
return driver
def find(self, expression, by='id', context=None, many=False, strict=False):
"""
Helper which calls selenium find_element_by_xxx methods.
expression: search expression
by: selenium.webdriver.common.by
context: element to search on. Default: driver
many: all matching elements
strict: error out when element is not found
Returns None instead of raising exception when element is not found.
"""
assert expression, 'expression is missing'
if context is None:
context = self.driver
if not many:
method_name = 'find_element'
else:
method_name = 'find_elements'
try:
func = getattr(context, method_name)
result = func(by, expression)
except NoSuchElementException:
if strict:
raise
else:
result = None
return result
def files_loaded(self):
"""
Test if dependencies were loaded. (Checks if UI has been rendered)
"""
indicator = self.find(".global-activity-indicator", By.CSS_SELECTOR)
return indicator is not None
def has_ca(self):
"""
FreeIPA server was installed with CA.
"""
return not self.config.get('no_ca')
def has_dns(self):
"""
FreeIPA server was installed with DNS.
"""
return not self.config.get('no_dns')
def has_trusts(self):
"""
FreeIPA server was installed with Trusts.
"""
return self.config.get('has_trusts')
def has_kra(self):
"""
FreeIPA server was installed with Kra.
"""
return self.config.get('has_kra')
def has_active_request(self):
"""
Check if there is running AJAX request
"""
global_indicators = self.find(".global-activity-indicator", By.CSS_SELECTOR, many=True)
for el in global_indicators:
try:
if not self.has_class(el, 'closed'):
return True
except StaleElementReferenceException:
# we don't care. Happens when indicator is part of removed dialog.
continue
return False
def wait(self, seconds=0.2):
"""
Wait specific amount of seconds
"""
time.sleep(seconds)
def wait_for_request(self, implicit=0.2, n=1, d=0):
"""
Wait for AJAX request to finish
"""
runner = self
for _i in range(n):
self.wait(implicit)
WebDriverWait(self.driver, self.request_timeout).until_not(lambda d: runner.has_active_request())
self.wait()
self.wait(d)
def xpath_has_val(self, attr, val):
"""
Create xpath expression for matching a presence of item in attribute
value where value is a list of items separated by space.
"""
return "contains(concat(' ',normalize-space(@%s), ' '),' %s ')" % (attr, val)
def init_app(self, login=None, password=None):
"""
Load and login
"""
self.load()
self.wait(0.5)
self.login(login, password)
# metadata + default page
self.wait_for_request(n=5)
def load(self):
"""
Navigate to Web UI first page and wait for loading of all dependencies.
"""
self.driver.get(self.get_base_url())
runner = self
WebDriverWait(self.driver, 10).until(lambda d: runner.files_loaded())
def login(self, login=None, password=None, new_password=None):
"""
Log in if user is not logged in.
"""
self.wait_for_request(n=2)
if not self.logged_in():
if not login:
login = self.config['ipa_admin']
if not password:
password = self.config['ipa_password']
if not new_password:
new_password = password
auth = self.get_login_screen()
login_tb = self.find("//input[@type='text'][@name='username']", 'xpath', auth, strict=True)
psw_tb = self.find("//input[@type='password'][@name='password']", 'xpath', auth, strict=True)
login_tb.send_keys(login)
psw_tb.send_keys(password)
psw_tb.send_keys(Keys.RETURN)
self.wait(0.5)
self.wait_for_request(n=2)
# reset password if needed
newpw_tb = self.find("//input[@type='password'][@name='new_password']", 'xpath', auth)
verify_tb = self.find("//input[@type='password'][@name='verify_password']", 'xpath', auth)
if newpw_tb and newpw_tb.is_displayed():
newpw_tb.send_keys(new_password)
verify_tb.send_keys(new_password)
verify_tb.send_keys(Keys.RETURN)
self.wait(0.5)
self.wait_for_request(n=2)
def logged_in(self):
"""
Check if user is logged in
"""
login_as = self.find('loggedinas', 'class name')
visible_name = len(login_as.text) > 0
logged_in = not self.login_screen_visible() and visible_name
return logged_in
def logout(self):
self.profile_menu_action('logout')
def get_login_screen(self):
"""
Get reference of login screen
"""
return self.find('rcue-login-screen', 'id')
def login_screen_visible(self):
"""
Check if login screen is visible
"""
screen = self.get_login_screen()
return screen and screen.is_displayed()
def take_screenshot(self, name):
if self.config.get('save_screenshots'):
scr_dir = self.config.get('screenshot_dir')
path = name + '.png'
if scr_dir:
path = os.path.join(scr_dir, path)
self.driver.get_screenshot_as_file(path)
def navigate_to_entity(self, entity, facet=None):
self.driver.get(self.get_url(entity, facet))
self.wait_for_request(n=3, d=0.4)
def navigate_by_menu(self, item, complete=True):
"""
Navigate by using menu
"""
if complete:
parts = item.split('/')
if len(parts) > 1:
parent = parts[0:-1]
self.navigate_by_menu('/'.join(parent), complete)
s = ".navbar a[href='#%s']" % item
link = self.find(s, By.CSS_SELECTOR, strict=True)
assert link.is_displayed(), 'Navigation link is not displayed: %s' % item
link.click()
self.wait_for_request()
self.wait_for_request(0.4)
def navigate_by_breadcrumb(self, item):
"""
Navigate by breadcrumb navigation
"""
facet = self.get_facet()
nav = self.find('.breadcrumb', By.CSS_SELECTOR, facet, strict=True)
a = self.find(item, By.LINK_TEXT, nav, strict=True)
a.click()
self.wait_for_request()
self.wait_for_request(0.4)
def switch_to_facet(self, name):
"""
Click on tab with given name
"""
facet = self.get_facet()
tabs = "div.facet-tabs"
sidebar = "div.sidebar-pf"
facets_container = self.find(tabs, By.CSS_SELECTOR, facet)
# handle sidebar instead of facet-tabs
# the webui facet can have only the facet-tabs OR sidebar, not both
if not facets_container:
facets_container = self.find(sidebar, By.CSS_SELECTOR, facet)
s = "li[name='%s'] a" % name
link = self.find(s, By.CSS_SELECTOR, facets_container, strict=True)
link.click()
# double wait because of facet's paging
self.wait_for_request(0.5)
self.wait_for_request()
def get_url(self, entity, facet=None):
"""
Create entity url
"""
url = [self.get_base_url(), '#', 'e', entity]
if facet:
url.append(facet)
return '/'.join(url)
def get_base_url(self):
"""
Get FreeIPA Web UI url
"""
host = self.config.get('ipa_server')
if not host:
self.skip('FreeIPA server hostname not configured')
return 'https://%s/ipa/ui' % host
def get_facet(self):
"""
Get currently displayed facet
"""
facet = self.find('.active-facet', By.CSS_SELECTOR)
assert facet is not None, "Current facet not found"
return facet
def get_facet_info(self, facet=None):
"""
Get information of currently displayed facet
"""
info = {}
# get facet
if facet is None:
facet = self.get_facet()
info["element"] = facet
#get facet name and entity
info["name"] = facet.get_attribute('data-name')
info["entity"] = facet.get_attribute('data-entity')
# get facet title
el = self.find(".facet-header h3 *:first-child", By.CSS_SELECTOR, facet)
if el:
info["title"] = el.text
# get facet pkey
el = self.find(".facet-header h3 span.facet-pkey", By.CSS_SELECTOR, facet)
if el:
info["pkey"] = el.text
return info
def get_dialogs(self, strict=False, name=None):
"""
Get all dialogs in DOM
"""
s = '.modal-dialog'
if name:
s += "[data-name='%s']" % name
dialogs = self.find(s, By.CSS_SELECTOR, many=True)
if strict:
assert dialogs, "No dialogs found"
return dialogs
def get_dialog(self, strict=False, name=None):
"""
Get last opened dialog
"""
dialogs = self.get_dialogs(strict, name)
dialog = None
if len(dialogs):
dialog = dialogs[-1]
return dialog
def get_last_error_dialog(self, dialog_name='error_dialog'):
"""
Get last opened error dialog or None.
"""
s = ".modal-dialog[data-name='%s']" % dialog_name
dialogs = self.find(s, By.CSS_SELECTOR, many=True)
dialog = None
if dialogs:
dialog = dialogs[-1]
return dialog
def get_dialog_info(self):
"""
Get last open dialog info: name, text if any.
Returns None if no dialog is open.
"""
dialog = self.get_dialog()
info = None
if dialog:
body = self.find('.modal-body', By.CSS_SELECTOR, dialog, strict=True)
info = {
'name': dialog.get_attribute('data-name'),
'text': body.text,
}
return info
def execute_api_from_ui(self, method, args, options, timeout=30):
"""
Executes FreeIPA API command/method from Web UI
"""
script = """
var method = arguments[0];
var args = arguments[1];
var options = arguments[2];
var callback = arguments[arguments.length - 1];
var rpc = require('freeipa/rpc');
var cmd = rpc.command({
method: method,
args: args,
options: options,
on_success: callback,
on_error: callback
});
cmd.execute();
"""
self.driver.set_script_timeout(timeout)
result = self.driver.execute_async_script(script, *[method, args, options])
return result
def click_on_link(self, text, parent=None):
"""
Click on link with given text and parent.
"""
if not parent:
parent = self.get_form()
link = self.find(text, By.LINK_TEXT, parent, strict=True)
link.click()
def facet_button_click(self, name):
"""
Click on facet button with given name
"""
facet = self.get_facet()
s = ".facet-controls button[name=%s]" % name
self._button_click(s, facet, name)
def dialog_button_click(self, name, dialog=None):
"""
Click on dialog button with given name
Chooses last dialog if none is supplied
"""
if not dialog:
dialog = self.get_dialog(strict=True)
s = ".rcue-dialog-buttons button[name='%s']" % name
self._button_click(s, dialog, name)
def action_button_click(self, name, parent):
"""
Click on .action-button
"""
if not parent:
parent = self.get_form()
s = "a[name='%s'].action-button" % name
self._button_click(s, parent, name)
def button_click(self, name, parent=None,
parents_css_sel=None):
"""
Click on .ui-button
"""
if not parent:
if parents_css_sel:
parent = self.find(parents_css_sel, By.CSS_SELECTOR,
strict=True)
else:
parent = self.get_form()
s = "[name='%s'].btn" % name
self._button_click(s, parent, name)
def _button_click(self, selector, parent, name=''):
btn = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
ActionChains(self.driver).move_to_element(btn).perform()
disabled = btn.get_attribute("disabled")
assert btn.is_displayed(), 'Button is not displayed: %s' % name
assert not disabled, 'Invalid button state: disabled. Button: %s' % name
btn.click()
self.wait_for_request()
def profile_menu_action(self, name):
"""
Execute action from profile menu
"""
menu_toggle = self.find('[name=profile-menu] > a', By.CSS_SELECTOR)
menu_toggle.click()
s = "[name=profile-menu] a[href='#%s']" % name
btn = self.find(s, By.CSS_SELECTOR, strict=True)
btn.click()
# action is usually followed by opening a dialog, add wait to compensate
# possible dialog transition effect
self.wait(0.5)
def get_form(self):
"""
Get last dialog or visible facet
"""
form = self.get_dialog()
if not form:
form = self.get_facet()
return form
def select(self, selector, value, parent=None):
"""
Select option with given value in select element
"""
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
Select(el).select_by_value(value)
def fill_text(self, selector, value, parent=None):
"""
Clear and enter text into input defined by selector.
Use for non-standard fields.
"""
if not parent:
parent = self.get_form()
tb = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
try:
tb.clear()
tb.send_keys(value)
except InvalidElementStateException as e:
msg = "Invalid Element State, el: %s, value: %s, error: %s" % (selector, value, e)
assert False, msg
def fill_input(self, name, value, input_type="text", parent=None):
"""
Type into input element specified by name and type.
"""
s = "div[name='%s'] input[type='%s'][name='%s']" % (name, input_type, name)
self.fill_text(s, value, parent)
def fill_textarea(self, name, value, parent=None):
"""
Clear and fill textarea.
"""
s = "textarea[name='%s']" % (name)
self.fill_text(s, value, parent)
def fill_textbox(self, name, value, parent=None):
"""
Clear and fill textbox.
"""
self.fill_input(name, value, "text", parent)
def fill_password(self, name, value, parent=None):
"""
Clear and fill input[type=password]
"""
self.fill_input(name, value, "password", parent)
def add_multivalued(self, name, value, parent=None):
"""
Add new value to multivalued textbox
"""
if not parent:
parent = self.get_form()
s = "div[name='%s'].multivalued-widget" % name
w = self.find(s, By.CSS_SELECTOR, parent, strict=True)
add_btn = self.find("button[name=add]", By.CSS_SELECTOR, w, strict=True)
add_btn.click()
s = "div[name=value] input"
inputs = self.find(s, By.CSS_SELECTOR, w, many=True)
last = inputs[-1]
last.send_keys(value)
def del_multivalued(self, name, value, parent=None):
"""
Mark value in multivalued textbox as deleted.
"""
if not parent:
parent = self.get_form()
s = "div[name='%s'].multivalued-widget" % name
w = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = "div[name=value] input"
inputs = self.find(s, By.CSS_SELECTOR, w, many=True)
clicked = False
for i in inputs:
val = i.get_attribute('value')
n = i.get_attribute('name')
if val == value:
s = "input[name='%s'] ~ .input-group-btn button[name=remove]" % n
link = self.find(s, By.CSS_SELECTOR, w, strict=True)
link.click()
self.wait()
clicked = True
assert clicked, 'Value was not removed: %s' % value
def fill_multivalued(self, name, instructions, parent=None):
"""
Add or delete a value from multivalued field
"""
for instruction in instructions:
t = instruction[0]
value = instruction[1]
if t == 'add':
self.add_multivalued(name, value, parent)
else:
self.del_multivalued(name, value, parent)
def check_option(self, name, value=None, parent=None):
"""
Find checkbox or radio with name which matches ^NAME\d$ and
check it by clicking on a label.
"""
if not parent:
parent = self.get_form()
s = "//input[@type='checkbox' or 'radio'][contains(@name, '%s')]" % name
if value is not None:
s += "[@value='%s']" % value
opts = self.find(s, "xpath", parent, many=True)
label = None
# Select only the one which matches exactly the name
for o in opts:
n = o.get_attribute("name")
if n == name or re.match("^%s\d+$" % name, n):
s = "label[for='%s']" % o.get_attribute("id")
label = self.find(s, By.CSS_SELECTOR, parent, strict=True)
break
assert label is not None, "Option not found: %s" % name
label.click()
def select_combobox(self, name, value, parent=None, combobox_input=None):
"""
Select value in a combobox. Search if not found.
"""
if not parent:
parent = self.get_form()
s = "[name='%s'].combobox-widget" % name
cb = self.find(s, By.CSS_SELECTOR, parent, strict=True)
open_btn = self.find('a[name=open] i', By.CSS_SELECTOR, cb, strict=True)
open_btn.click()
self.wait()
self.wait_for_request()
list_cnt = self.find('.combobox-widget-list', By.CSS_SELECTOR, cb, strict=True)
search_btn = self.find('a[name=search] i', By.CSS_SELECTOR, cb, strict=True)
opt_s = "select[name=list] option[value='%s']" % value
option = self.find(opt_s, By.CSS_SELECTOR, cb)
if combobox_input:
if not option:
self.fill_textbox(combobox_input, value, cb)
else:
if not option:
# try to search
self.fill_textbox('filter', value, cb)
search_btn.click()
self.wait_for_request()
option = self.find(opt_s, By.CSS_SELECTOR, cb, strict=True)
option.click()
# Chrome does not close search area on click
if list_cnt.is_displayed():
self.driver.switch_to_active_element().send_keys(Keys.RETURN)
self.wait()
def get_text(self, selector, parent=None):
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
return el.text
def get_value(self, selector, parent=None):
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
value = el.get_attribute('value')
return value
def get_field_text(self, name, parent=None, element='p'):
s = ".controls %s[name='%s']" % (element, name)
return self.get_text(s, parent)
def get_field_value(self, name, parent=None, element='input'):
s = ".controls %s[name='%s']" % (element, name)
return self.get_value(s, parent)
def get_multivalued_value(self, name, parent=None):
s = "div[name='%s'] div[name='value'] input[name^='%s']" % (name, name)
els = self.find(s, By.CSS_SELECTOR, parent, many=True)
values = []
for el in els:
values.append(el.get_attribute('value'))
return values
def get_field_checked(self, name, parent=None):
if not parent:
parent = self.get_form()
s = "div[name='%s'] input[name^='%s']" % (name, name)
els = self.find(s, By.CSS_SELECTOR, parent, strict=True, many=True)
values = []
for el in els:
if el.is_selected():
values.append(el.get_attribute('value'))
return values
def get_field_selected(self, name, parent=None):
if not parent:
parent = self.get_form()
s = "div[name='%s'] select[name='%s']" % (name, name)
el = self.find(s, By.CSS_SELECTOR, parent, strict=True)
select = Select(el)
selected = select.all_selected_options
values = []
for opt in selected:
values.append(opt.get_attribute('value'))
return values
def get_undo_buttons(self, field, parent):
"""
Get field undo button
"""
if not parent:
parent = self.get_form()
s = ".controls div[name='%s'] .btn.undo" % (field)
undos = self.find(s, By.CSS_SELECTOR, parent, strict=True, many=True)
return undos
def get_rows(self, parent=None, name=None):
"""
Return all rows of search table.
"""
if not parent:
parent = self.get_form()
# select table rows
s = self.get_table_selector(name)
s += ' tbody tr'
rows = self.find(s, By.CSS_SELECTOR, parent, many=True)
return rows
def get_row(self, pkey, parent=None, name=None):
"""
Get row element of search table with given pkey. None if not found.
"""
rows = self.get_rows(parent, name)
s = "input[value='%s']" % pkey
for row in rows:
has = self.find(s, By.CSS_SELECTOR, row)
if has:
return row
return None
def navigate_to_row_record(self, row, pkey_column=None):
"""
Navigate to record by clicking on a link.
"""
s = 'a'
if pkey_column:
s = "div[name='%s'] a" % pkey_column
link = self.find(s, By.CSS_SELECTOR, row, strict=True)
link.click()
self.wait_for_request(0.4)
self.wait_for_request()
def get_table_selector(self, name=None):
"""
Construct table selector
"""
s = "table"
if name:
s += "[name='%s']" % name
s += '.table'
return s
def select_record(self, pkey, parent=None, table_name=None):
"""
Select record with given pkey in search table.
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
input_s = s + " tbody td input[value='%s']" % pkey
checkbox = self.find(input_s, By.CSS_SELECTOR, parent, strict=True)
try:
ActionChains(self.driver).move_to_element(checkbox).click().perform()
except WebDriverException as e:
assert False, 'Can\'t click on checkbox label: %s \n%s' % (s, e)
self.wait()
assert checkbox.is_selected(), 'Record was not checked: %s' % input_s
self.wait()
def get_record_value(self, pkey, column, parent=None, table_name=None):
"""
Get table column's text value
"""
row = self.get_row(pkey, parent, table_name)
s = "div[name=%s]" % column
val = None
if row:
el = self.find(s, By.CSS_SELECTOR, row)
val = el.text
return val
def has_record(self, pkey, parent=None, table_name=None):
"""
Check if table contains specific record.
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
s += " tbody td input[value='%s']" % pkey
checkbox = self.find(s, By.CSS_SELECTOR, parent)
return checkbox is not None
def navigate_to_record(self, pkey, parent=None, table_name=None, entity=None, facet='search'):
"""
Clicks on record with given pkey in search table and thus cause
navigation to the record.
"""
if entity:
self.navigate_to_entity(entity, facet)
if not parent:
parent = self.get_facet()
s = self.get_table_selector(table_name)
s += " tbody"
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
link = self.find(pkey, By.LINK_TEXT, table, strict=True)
link.click()
self.wait_for_request()
def delete_record(
self, pkeys, fields=None, parent=None, table_name=None,
facet_btn='remove'):
"""
Delete records with given pkeys in currently opened search table.
"""
if type(pkeys) is not list:
pkeys = [pkeys]
# select
selected = False
for pkey in pkeys:
delete = self.has_record(pkey, parent, table_name)
if delete:
self.select_record(pkey, parent, table_name)
selected = True
# exec and confirm
if selected:
if table_name and parent:
s = self.get_table_selector(table_name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
self.button_click(facet_btn, table)
else:
self.facet_button_click(facet_btn)
if fields:
self.fill_fields(fields)
self.dialog_button_click('ok')
self.wait_for_request(n=2)
self.wait()
def delete(self, entity, data_list, facet='search', navigate=True):
"""
Delete entity records:
"""
if navigate:
self.navigate_to_entity(entity, facet)
for data in data_list:
pkey = data.get('pkey')
fields = data.get('del')
self.delete_record(pkey, fields)
def fill_fields(
self, fields, parent=None, undo=False, combobox_input=None):
"""
Fill dialog or facet inputs with give data.
Expected format:
[
('widget_type', 'key', value'),
('widget_type', 'key2', value2'),
]
"""
if not parent:
parent = self.get_form()
for field in fields:
widget_type = field[0]
key = field[1]
val = field[2]
if undo and not hasattr(key, '__call__'):
self.assert_undo_button(key, False, parent)
if widget_type == 'textbox':
self.fill_textbox(key, val, parent)
elif widget_type == 'textarea':
self.fill_textarea(key, val, parent)
elif widget_type == 'password':
self.fill_password(key, val, parent)
elif widget_type == 'radio':
self.check_option(key, val, parent)
elif widget_type == 'checkbox':
self.check_option(key, val, parent=parent)
elif widget_type == 'selectbox':
self.select('select[name=%s]' % key, val, parent)
elif widget_type == 'combobox':
self.select_combobox(
key, val, parent, combobox_input=combobox_input)
elif widget_type == 'add_table_record':
self.add_table_record(key, val, parent)
elif widget_type == 'add_table_association':
self.add_table_associations(key, val, parent)
elif widget_type == 'multivalued':
self.fill_multivalued(key, val, parent)
elif widget_type == 'table':
self.select_record(val, parent, key)
# this meta field specifies a function, to extend functionality of
# field checking
elif widget_type == 'callback':
if hasattr(key, '__call__'):
key(val)
self.wait()
if undo and not hasattr(key, '__call__'):
self.assert_undo_button(key, True, parent)
def validate_fields(self, fields, parent=None):
"""
Validate that fields on a page or dialog have desired values.
"""
if not fields:
return
if not parent:
parent = self.get_form()
for field in fields:
ftype = field[0]
key = field[1]
expected = field[2]
actual = None
if ftype == 'label':
actual = self.get_field_text(key, parent)
elif ftype in ('textbox', 'password', 'combobox'):
actual = self.get_field_value(key, parent, 'input')
elif ftype == 'textarea':
actual = self.get_field_value(key, parent, 'textarea')
elif ftype == 'radio':
actual = self.get_field_checked(key, parent)
elif ftype == 'checkbox':
actual = self.get_field_checked(key, parent)
elif ftype == 'multivalued':
actual = self.get_multivalued_value(key, parent)
elif ftype == 'table_record':
if self.has_record(expected, parent, key):
actual = expected
valid = False
if type(expected) == list:
valid = type(actual) == list and sorted(expected) == sorted(actual)
else:
# compare other values, usually strings:
valid = actual == expected
assert valid, "Values don't match. Expected: '%s', Got: '%s'" % (expected, actual)
def find_record(self, entity, data, facet='search', dummy='XXXXXXX'):
"""
Test search functionality of search facet.
1. search for non-existent value and test if result set is empty.
2. search for specific pkey and test if it's present on the page
3. reset search page by not using search criteria
"""
self.assert_facet(entity, facet)
facet = self.get_facet()
search_field_s = '.search-filter input[name=filter]'
key = data.get('pkey')
self.fill_text(search_field_s, dummy, facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
self.assert_record(key, negative=True)
self.fill_text(search_field_s, key, facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
self.assert_record(key)
self.fill_text(search_field_s, '', facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
def add_record(self, entity, data, facet='search', facet_btn='add',
dialog_btn='add', delete=False, pre_delete=True,
dialog_name='add', navigate=True, combobox_input=None):
"""
Add records.
Expected data format:
{
'pkey': 'key',
add: [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
}
"""
pkey = data['pkey']
if navigate:
self.navigate_to_entity(entity, facet)
# check facet
self.assert_facet(entity, facet)
# delete if exists, ie. from previous test fail
if pre_delete:
self.delete_record(pkey, data.get('del'))
# current row count
self.wait_for_request(0.5)
count = len(self.get_rows())
# open add dialog
self.assert_no_dialog()
self.facet_button_click(facet_btn)
self.assert_dialog(dialog_name)
# fill dialog
self.fill_fields(data['add'], combobox_input=combobox_input)
# confirm dialog
self.dialog_button_click(dialog_btn)
self.wait_for_request()
self.wait_for_request()
# check expected error/warning/info
expected = ['error_4304_info']
dialog_info = self.get_dialog_info()
if dialog_info and dialog_info['name'] in expected:
self.dialog_button_click('ok')
self.wait_for_request()
# check for error
self.assert_no_error_dialog()
self.wait_for_request()
self.wait_for_request(0.4)
# check if table has more rows
new_count = len(self.get_rows())
# adjust because of paging
expected = count + 1
if count == 20:
expected = 20
self.assert_row_count(expected, new_count)
# delete record
if delete:
self.delete_record(pkey)
new_count = len(self.get_rows())
self.assert_row_count(count, new_count)
def mod_record(self, entity, data, facet='details', facet_btn='save'):
"""
Mod record
Assumes that it is already on details page.
"""
self.assert_facet(entity, facet)
# TODO assert pkey
self.assert_facet_button_enabled(facet_btn, enabled=False)
self.fill_fields(data['mod'], undo=True)
self.assert_facet_button_enabled(facet_btn)
self.facet_button_click(facet_btn)
self.wait_for_request()
self.wait_for_request()
self.assert_facet_button_enabled(facet_btn, enabled=False)
def basic_crud(self, entity, data,
parent_entity=None,
details_facet='details',
search_facet='search',
default_facet='details',
add_facet_btn='add',
add_dialog_btn='add',
add_dialog_name='add',
update_btn='save',
breadcrumb=None,
navigate=True,
delete=True):
"""
Basic CRUD operation sequence.
Expected data format:
{
'pkey': 'key',
'add': [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
'mod': [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
}
"""
# important for nested entities. Ie. autoumount maps
if not parent_entity:
parent_entity = entity
pkey = data['pkey']
# 1. Open Search Facet
if navigate:
self.navigate_to_entity(parent_entity)
self.assert_facet(parent_entity, search_facet)
self.wait_for_request()
# 2. Add record
self.add_record(parent_entity, data, facet=search_facet, navigate=False,
facet_btn=add_facet_btn, dialog_name=add_dialog_name,
dialog_btn=add_dialog_btn
)
# Find
self.find_record(parent_entity, data, search_facet)
# 3. Navigate to details facet
self.navigate_to_record(pkey)
self.assert_facet(entity, default_facet)
self.wait_for_request(0.5)
if default_facet != details_facet:
self.switch_to_facet(details_facet)
self.assert_facet(entity, details_facet)
self.validate_fields(data.get('add_v'))
# 4. Mod values
if data.get('mod'):
self.mod_record(entity, data, details_facet, update_btn)
self.validate_fields(data.get('mod_v'))
if not breadcrumb:
self.navigate_to_entity(entity, search_facet)
else:
self.navigate_by_breadcrumb(breadcrumb)
# 5. Delete record
if delete:
self.delete_record(pkey, data.get('del'))
def add_table_record(self, name, data, parent=None):
"""
Add record to dnsrecord table, association table and similar
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = ".btn[name=%s]" % 'add'
btn = self.find(s, By.CSS_SELECTOR, table, strict=True)
btn.click()
self.wait()
self.fill_fields(data['fields'])
self.dialog_button_click('add')
self.wait_for_request()
def prepare_associations(
self, pkeys, facet=None, facet_btn='add', member_pkeys=None):
"""
Helper function for add_associations and delete_associations
"""
if facet:
self.switch_to_facet(facet)
self.facet_button_click(facet_btn)
self.wait()
self.wait_for_request()
for key in pkeys:
self.select_record(key, table_name='available')
self.button_click('add')
self.dialog_button_click('add')
self.wait_for_request()
if member_pkeys:
check_pkeys = member_pkeys
else:
check_pkeys = pkeys
return check_pkeys
def add_associations(
self, pkeys, facet=None, delete=False, facet_btn='add',
member_pkeys=None):
"""
Add associations
"""
check_pkeys = self.prepare_associations(
pkeys, facet, facet_btn, member_pkeys)
for key in check_pkeys:
self.assert_record(key)
if delete:
self.delete_record(key)
self.assert_record(key, negative=True)
def delete_associations(
self, pkeys, facet=None, facet_btn='remove', member_pkeys=None):
"""
Remove associations
"""
check_pkeys = self.prepare_associations(
pkeys, facet, facet_btn, member_pkeys)
for key in check_pkeys:
self.assert_record(key, negative=True)
def add_table_associations(self, table_name, pkeys, parent=False, delete=False):
"""
Add value to table (association|rule|...)
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = "button[name='%s']" % 'add'
btn = self.find(s, By.CSS_SELECTOR, table, strict=True)
btn.click()
self.wait_for_request(0.4)
for key in pkeys:
self.select_record(key, table_name='available')
self.button_click('add')
self.wait()
self.dialog_button_click('add')
self.wait_for_request(n=2)
for key in pkeys:
self.assert_record(key, parent, table_name)
if delete:
self.delete_record(pkeys, None, parent, table_name)
for key in pkeys:
self.assert_record(key, parent, table_name, negative=True)
def action_list_action(self, name, confirm=True, confirm_btn="ok",
parents_css_sel=None):
"""
Execute action list action
"""
context = None
if not parents_css_sel:
context = self.find(".active-facet .facet-actions",
By.CSS_SELECTOR, strict=True)
else:
context = self.find(parents_css_sel, By.CSS_SELECTOR,
strict=True)
expand = self.find(".dropdown-toggle", By.CSS_SELECTOR, context,
strict=True)
expand.click()
action_link = self.find("li[data-name=%s] a" % name, By.CSS_SELECTOR,
context, strict=True)
action_link.click()
if confirm:
self.wait(0.5) # wait for dialog
self.dialog_button_click(confirm_btn)
self.wait()
def action_panel_action(self, panel_name, action):
"""
Execute action from action panel with given name.
"""
s = "div[data-name='%s'].action-panel" % panel_name
s += " a[data-name='%s']" % action
link = self.find(s, By.CSS_SELECTOR, strict=True)
link.click()
self.wait()
def enable_action(self):
"""
Execute and test 'enable' action panel action.
"""
title = self.find('.active-facet div.facet-title', By.CSS_SELECTOR, strict=True)
self.action_list_action('enable')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_class(title, 'disabled', negative=True)
def disable_action(self):
"""
Execute and test 'disable' action panel action.
"""
title = self.find('.active-facet div.facet-title', By.CSS_SELECTOR, strict=True)
self.action_list_action('disable')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_class(title, 'disabled')
def delete_action(self, entity, pkey, action='delete', facet='search'):
"""
Execute and test 'delete' action panel action.
"""
self.action_list_action(action)
self.wait_for_request(n=4)
self.assert_no_error_dialog()
self.assert_facet(entity, facet)
self.assert_record(pkey, negative=True)
def mod_rule_tables(self, tables, categories, no_categories):
"""
Test functionality of rule table widgets in a facet
"""
def get_t_vals(t):
table = t[0]
k = t[1]
e = []
if len(t) > 2:
e = t[2]
return table, k, e
t_list = [t[0] for t in tables if t[0] not in no_categories]
# add values
for t in tables:
table, keys, _exts = get_t_vals(t)
# add one by one to test for #3711
for key in keys:
self.add_table_associations(table, [key])
#disable tables
for cat in categories:
self.check_option(cat, 'all')
# update
self.assert_rule_tables_enabled(t_list, False)
self.facet_button_click('save')
self.wait_for_request(n=3, d=0.3)
self.assert_rule_tables_enabled(t_list, False)
p = self.get_form()
# now tables in categories should be empty, check it
for t in tables:
table, keys, _exts = get_t_vals(t)
if table in no_categories:
# clear the rest
self.delete_record(keys, None, p, table)
continue
for key in keys:
self.assert_record(key, p, table, negative=True)
# enable tables
for cat in categories:
self.check_option(cat, '')
self.assert_rule_tables_enabled(t_list, True)
self.facet_button_click('save')
self.wait_for_request(n=3, d=0.3)
self.assert_rule_tables_enabled(t_list, True)
for t in tables:
table, keys, _exts = get_t_vals(t)
# add multiple at once and test table delete button
self.add_table_associations(table, keys, delete=True)
def has_class(self, el, cls):
"""
Check if el has CSS class
"""
return cls in el.get_attribute("class").split()
def skip(self, reason):
"""
Skip tests
"""
raise nose.SkipTest(reason)
def assert_text(self, selector, value, parent=None):
"""
Assert read-only text value in details page or in a form
"""
text = self.get_text(selector, parent)
text = text.strip()
value = value.strip()
assert text == value, "Invalid value: '%s' Expected: %s" % (text, value)
def assert_text_field(self, name, value, parent=None, element='label'):
"""
Assert read-only text value in details page or in a form
"""
s = "div[name='%s'] %s[name='%s']" % (name, element, name)
self.assert_text(s, value, parent)
def assert_empty_value(self, selector, parent=None, negative=False):
"""
Assert empty value of some field in details page or in a form
"""
value = self.get_value(selector, parent)
if negative:
assert not value == ''
else:
assert value == ''
def assert_no_dialog(self):
"""
Assert that no dialog is opened
"""
dialogs = self.get_dialogs()
assert not dialogs, 'Invalid state: dialog opened'
def assert_dialog(self, name=None):
"""
Assert that one dialog is opened or a dialog with given name
"""
dialogs = self.get_dialogs(name)
assert len(dialogs) == 1, 'No or more than one dialog opened'
def assert_no_error_dialog(self):
"""
Assert that no error dialog is opened
"""
dialog = self.get_last_error_dialog()
ok = dialog is None
if not ok:
msg = self.find('p', By.CSS_SELECTOR, dialog).text
assert ok, 'Unexpected error: %s' % msg
def assert_row_count(self, expected, current):
"""
Assert that row counts match
"""
assert expected == current, "Rows don't match. Expected: %d, Got: %d" % (expected, current)
def assert_button_enabled(self, name, context_selector=None, enabled=True):
"""
Assert that button is enabled or disabled (expects that element will be
<button>)
"""
s = ""
if context_selector:
s = context_selector
s += "button[name=%s]" % name
facet = self.get_facet()
btn = self.find(s, By.CSS_SELECTOR, facet, strict=True)
valid = enabled == btn.is_enabled()
assert btn.is_displayed(), 'Button is not displayed'
assert valid, 'Button (%s) has incorrect enabled state (enabled==%s).' % (s, enabled)
def assert_facet_button_enabled(self, name, enabled=True):
"""
Assert that facet button is enabled or disabled
"""
self.assert_button_enabled(name, ".facet-controls ", enabled)
def assert_table_button_enabled(self, name, table_name, enabled=True):
"""
Assert that button in table is enabled/disabled
"""
s = "table[name='%s'] " % table_name
self.assert_button_enabled(name, s, enabled)
def assert_facet(self, entity, facet=None):
"""
Assert that current facet is correct
"""
info = self.get_facet_info()
if not facet is None:
assert info["name"] == facet, "Invalid facet. Expected: %s, Got: %s " % (facet, info["name"])
assert info["entity"] == entity, "Invalid entity. Expected: %s, Got: %s " % (entity, info["entity"])
def assert_undo_button(self, field, visible=True, parent=None):
"""
Assert that undo button is or is not visible
"""
undos = self.get_undo_buttons(field, parent)
state = False
for undo in undos:
if undo.is_displayed():
state = True
break
if visible:
assert state, "Undo button not visible. Field: %s" % field
else:
assert not state, "Undo button visible. Field: %s" % field
def assert_visible(self, selector, parent=None, negative=False):
"""
Assert that element defined by selector is visible
"""
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
visible = el.is_displayed()
if negative:
assert not visible, "Element visible: %s" % selector
else:
assert visible, "Element not visible: %s" % selector
def assert_disabled(self, selector, parent=None, negative=False):
"""
Assert that element defined by selector is disabled
"""
if not parent:
parent = self.get_form()
self.find(selector, By.CSS_SELECTOR, parent, strict=True)
dis = self.find(selector+"[disabled]", By.CSS_SELECTOR, parent)
if negative:
assert dis is None, "Element is disabled: %s" % selector
else:
assert dis, "Element is not disabled: %s" % selector
def assert_record(self, pkey, parent=None, table_name=None, negative=False):
"""
Assert that record is in current search table
"""
has = self.has_record(pkey, parent, table_name)
has |= self.has_record(pkey.lower(), parent, table_name)
if negative:
assert not has, "Record exists when it shouldn't: %s" % pkey
else:
assert has, 'Record does not exist: %s' % pkey
def assert_indirect_record(self, pkey, entity, facet, negative=False, switch=True):
"""
Switch to indirect facet and assert record.
Lowers the key by default.
"""
if switch:
self.switch_to_facet(facet)
radio_name = "%s-%s-type-radio" % (entity, facet.replace('_', '-'))
self.check_option(radio_name, 'indirect')
self.wait_for_request(n=2)
key = pkey
self.assert_record(key, negative=negative)
def assert_record_value(self, expected, pkey, column, parent=None, table_name=None):
"""
Assert that column's value of record defined by pkey equals expected value.
"""
val = self.get_record_value(pkey, column, parent, table_name)
assert expected == val, "Invalid value: '%s'. Expected: '%s'." % (val, expected)
def assert_class(self, element, cls, negative=False):
"""
Assert that element has certain class
"""
valid = self.has_class(element, cls)
if negative:
assert not valid, "Element contains unwanted class: %s" % cls
else:
assert valid, "Element doesn't contain required class: %s" % cls
def assert_rule_tables_enabled(self, tables, enabled):
"""
Assert that rule table is editable - values can be added and removed.
"""
for table in tables:
self.assert_table_button_enabled('add', table, enabled)
def assert_menu_item(self, path, present=True):
"""
Assert that menu link is not rendered or visible
"""
s = ".navigation a[href='#%s']" % path
link = self.find(s, By.CSS_SELECTOR)
is_present = link is not None and link.is_displayed()
assert present == is_present, ('Invalid state of navigation item: %s. '
'Presence expected: %s') % (path, str(present))
def assert_action_panel_action(self, panel_name, action, visible=True, enabled=True):
"""
Assert that action panel action is visible/hidden, and enabled/disabled
Enabled is checked only if action is visible.
"""
s = "div[data-name='%s'].action-panel" % panel_name
s += " a[data-name='%s']" % action
link = self.find(s, By.CSS_SELECTOR)
is_visible = link is not None and link.is_displayed()
is_enabled = False
if is_visible:
is_enabled = not self.has_class(link, 'disabled')
assert is_visible == visible, ('Invalid visibility of action button: %s. '
'Expected: %s') % (action, str(visible))
if is_visible:
assert is_enabled == enabled, ('Invalid enabled state of action button %s. '
'Expected: %s') % (action, str(visible))
def assert_action_list_action(self, action, visible=True, enabled=True,
parent=None, parents_css_sel=None,
facet_actions=True):
"""
Assert that action dropdown action is visible/hidden, and enabled/disabled
Enabled is checked only if action is visible.
"""
li_s = " li[data-name='%s']" % action
if not parent:
parent = self.get_form()
if facet_actions:
li_s = ".facet-actions" + li_s
else:
li_s = parents_css_sel + li_s
li = self.find(li_s, By.CSS_SELECTOR, parent)
link = self.find("a", By.CSS_SELECTOR, li)
is_visible = li is not None and link is not None
is_enabled = False
assert is_visible == visible, ('Invalid visibility of action item: %s. '
'Expected: %s') % (action, str(visible))
if is_visible:
is_enabled = not self.has_class(li, 'disabled')
assert is_enabled == enabled, ('Invalid enabled state of action item %s. '
'Expected: %s') % (action, str(visible))
| gpl-3.0 | 5,625,302,678,104,076,000 | 6,896,857,694,397,262,000 | 32.429121 | 109 | 0.547986 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.